2024-11-27 16:20:48,089 main DEBUG Apache Log4j Core 2.17.2 initializing configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@7fb4f2a9 2024-11-27 16:20:48,101 main DEBUG Took 0.010334 seconds to load 1 plugins from package org.apache.hadoop.hbase.logging 2024-11-27 16:20:48,101 main DEBUG PluginManager 'Core' found 129 plugins 2024-11-27 16:20:48,101 main DEBUG PluginManager 'Level' found 0 plugins 2024-11-27 16:20:48,102 main DEBUG PluginManager 'Lookup' found 16 plugins 2024-11-27 16:20:48,103 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-27 16:20:48,110 main DEBUG PluginManager 'TypeConverter' found 26 plugins 2024-11-27 16:20:48,121 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.util.MBeans", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-27 16:20:48,122 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-27 16:20:48,123 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.logging.TestJul2Slf4j", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-27 16:20:48,123 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-27 16:20:48,124 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.zookeeper", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-27 16:20:48,124 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-27 16:20:48,125 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSinkAdapter", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-27 16:20:48,125 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-27 16:20:48,126 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSystemImpl", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-27 16:20:48,126 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-27 16:20:48,127 main DEBUG LoggerConfig$Builder(additivity="false", level="WARN", levelAndRefs="null", name="org.apache.directory", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-27 16:20:48,127 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-27 16:20:48,127 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.ipc.FailedServers", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-27 16:20:48,127 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-27 16:20:48,128 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsConfig", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-27 16:20:48,128 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-27 16:20:48,129 main DEBUG LoggerConfig$Builder(additivity="null", level="INFO", levelAndRefs="null", name="org.apache.hadoop.hbase.ScheduledChore", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-27 16:20:48,129 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-27 16:20:48,129 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.regionserver.RSRpcServices", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-27 16:20:48,129 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-27 16:20:48,130 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-27 16:20:48,130 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-27 16:20:48,130 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-27 16:20:48,131 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-27 16:20:48,131 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hbase.thirdparty.io.netty.channel", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-27 16:20:48,131 main DEBUG Building Plugin[name=root, class=org.apache.logging.log4j.core.config.LoggerConfig$RootLogger]. 2024-11-27 16:20:48,133 main DEBUG LoggerConfig$RootLogger$Builder(additivity="null", level="null", levelAndRefs="INFO,Console", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-27 16:20:48,134 main DEBUG Building Plugin[name=loggers, class=org.apache.logging.log4j.core.config.LoggersPlugin]. 2024-11-27 16:20:48,136 main DEBUG createLoggers(={org.apache.hadoop.metrics2.util.MBeans, org.apache.hadoop.hbase.logging.TestJul2Slf4j, org.apache.zookeeper, org.apache.hadoop.metrics2.impl.MetricsSinkAdapter, org.apache.hadoop.metrics2.impl.MetricsSystemImpl, org.apache.directory, org.apache.hadoop.hbase.ipc.FailedServers, org.apache.hadoop.metrics2.impl.MetricsConfig, org.apache.hadoop.hbase.ScheduledChore, org.apache.hadoop.hbase.regionserver.RSRpcServices, org.apache.hadoop, org.apache.hadoop.hbase, org.apache.hbase.thirdparty.io.netty.channel, root}) 2024-11-27 16:20:48,136 main DEBUG Building Plugin[name=layout, class=org.apache.logging.log4j.core.layout.PatternLayout]. 2024-11-27 16:20:48,137 main DEBUG PatternLayout$Builder(pattern="%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n", PatternSelector=null, Configuration(PropertiesConfig), Replace=null, charset="null", alwaysWriteExceptions="null", disableAnsi="null", noConsoleNoAnsi="null", header="null", footer="null") 2024-11-27 16:20:48,137 main DEBUG PluginManager 'Converter' found 47 plugins 2024-11-27 16:20:48,145 main DEBUG Building Plugin[name=appender, class=org.apache.hadoop.hbase.logging.HBaseTestAppender]. 2024-11-27 16:20:48,148 main DEBUG HBaseTestAppender$Builder(target="SYSTEM_ERR", maxSize="1G", bufferedIo="null", bufferSize="null", immediateFlush="null", ignoreExceptions="null", PatternLayout(%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n), name="Console", Configuration(PropertiesConfig), Filter=null, ={}) 2024-11-27 16:20:48,149 main DEBUG Starting HBaseTestOutputStreamManager SYSTEM_ERR 2024-11-27 16:20:48,150 main DEBUG Building Plugin[name=appenders, class=org.apache.logging.log4j.core.config.AppendersPlugin]. 2024-11-27 16:20:48,150 main DEBUG createAppenders(={Console}) 2024-11-27 16:20:48,151 main DEBUG Configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@7fb4f2a9 initialized 2024-11-27 16:20:48,151 main DEBUG Starting configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@7fb4f2a9 2024-11-27 16:20:48,151 main DEBUG Started configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@7fb4f2a9 OK. 2024-11-27 16:20:48,152 main DEBUG Shutting down OutputStreamManager SYSTEM_OUT.false.false-1 2024-11-27 16:20:48,152 main DEBUG OutputStream closed 2024-11-27 16:20:48,153 main DEBUG Shut down OutputStreamManager SYSTEM_OUT.false.false-1, all resources released: true 2024-11-27 16:20:48,153 main DEBUG Appender DefaultConsole-1 stopped with status true 2024-11-27 16:20:48,153 main DEBUG Stopped org.apache.logging.log4j.core.config.DefaultConfiguration@54e1c68b OK 2024-11-27 16:20:48,220 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6 2024-11-27 16:20:48,222 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=StatusLogger 2024-11-27 16:20:48,223 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=ContextSelector 2024-11-27 16:20:48,224 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name= 2024-11-27 16:20:48,225 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.directory 2024-11-27 16:20:48,225 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSinkAdapter 2024-11-27 16:20:48,225 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.zookeeper 2024-11-27 16:20:48,226 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.logging.TestJul2Slf4j 2024-11-27 16:20:48,226 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSystemImpl 2024-11-27 16:20:48,226 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.util.MBeans 2024-11-27 16:20:48,227 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase 2024-11-27 16:20:48,227 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop 2024-11-27 16:20:48,227 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ipc.FailedServers 2024-11-27 16:20:48,227 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.regionserver.RSRpcServices 2024-11-27 16:20:48,228 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsConfig 2024-11-27 16:20:48,228 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hbase.thirdparty.io.netty.channel 2024-11-27 16:20:48,228 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ScheduledChore 2024-11-27 16:20:48,229 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Appenders,name=Console 2024-11-27 16:20:48,231 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-11-27 16:20:48,232 main DEBUG Reconfiguration complete for context[name=1dbd16a6] at URI jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-logging/target/hbase-logging-2.7.0-SNAPSHOT-tests.jar!/log4j2.properties (org.apache.logging.log4j.core.LoggerContext@7dda48d9) with optional ClassLoader: null 2024-11-27 16:20:48,232 main DEBUG Shutdown hook enabled. Registering a new one. 2024-11-27 16:20:48,232 main DEBUG LoggerContext[name=1dbd16a6, org.apache.logging.log4j.core.LoggerContext@7dda48d9] started OK. 2024-11-27T16:20:48,475 DEBUG [main {}] hbase.HBaseTestingUtility(348): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/d81050c1-1878-e772-43c1-4ef7cd483263 2024-11-27 16:20:48,478 main DEBUG AsyncLogger.ThreadNameStrategy=UNCACHED (user specified null, default is UNCACHED) 2024-11-27 16:20:48,478 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-11-27T16:20:48,487 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.TestAcidGuaranteesWithBasicPolicy timeout: 13 mins 2024-11-27T16:20:48,506 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1126): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=1, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-27T16:20:48,509 INFO [Time-limited test {}] hbase.HBaseZKTestingUtility(82): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/d81050c1-1878-e772-43c1-4ef7cd483263/cluster_6cd18b6a-993a-7aed-ac6c-30d6060ee827, deleteOnExit=true 2024-11-27T16:20:48,509 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1140): STARTING DFS 2024-11-27T16:20:48,510 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/d81050c1-1878-e772-43c1-4ef7cd483263/test.cache.data in system properties and HBase conf 2024-11-27T16:20:48,511 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/d81050c1-1878-e772-43c1-4ef7cd483263/hadoop.tmp.dir in system properties and HBase conf 2024-11-27T16:20:48,511 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/d81050c1-1878-e772-43c1-4ef7cd483263/hadoop.log.dir in system properties and HBase conf 2024-11-27T16:20:48,512 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/d81050c1-1878-e772-43c1-4ef7cd483263/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-27T16:20:48,512 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/d81050c1-1878-e772-43c1-4ef7cd483263/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-27T16:20:48,512 INFO [Time-limited test {}] hbase.HBaseTestingUtility(811): read short circuit is OFF 2024-11-27T16:20:48,605 WARN [Time-limited test {}] util.NativeCodeLoader(60): Unable to load native-hadoop library for your platform... using builtin-java classes where applicable 2024-11-27T16:20:48,703 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-27T16:20:48,706 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/d81050c1-1878-e772-43c1-4ef7cd483263/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-27T16:20:48,707 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/d81050c1-1878-e772-43c1-4ef7cd483263/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-27T16:20:48,707 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/d81050c1-1878-e772-43c1-4ef7cd483263/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-27T16:20:48,708 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/d81050c1-1878-e772-43c1-4ef7cd483263/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-27T16:20:48,708 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/d81050c1-1878-e772-43c1-4ef7cd483263/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-27T16:20:48,709 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/d81050c1-1878-e772-43c1-4ef7cd483263/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-27T16:20:48,709 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/d81050c1-1878-e772-43c1-4ef7cd483263/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-27T16:20:48,709 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/d81050c1-1878-e772-43c1-4ef7cd483263/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-27T16:20:48,710 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/d81050c1-1878-e772-43c1-4ef7cd483263/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-27T16:20:48,710 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/d81050c1-1878-e772-43c1-4ef7cd483263/nfs.dump.dir in system properties and HBase conf 2024-11-27T16:20:48,710 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/d81050c1-1878-e772-43c1-4ef7cd483263/java.io.tmpdir in system properties and HBase conf 2024-11-27T16:20:48,711 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/d81050c1-1878-e772-43c1-4ef7cd483263/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-27T16:20:48,711 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/d81050c1-1878-e772-43c1-4ef7cd483263/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-27T16:20:48,712 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/d81050c1-1878-e772-43c1-4ef7cd483263/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-27T16:20:49,556 WARN [Time-limited test {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-namenode.properties,hadoop-metrics2.properties 2024-11-27T16:20:49,630 INFO [Time-limited test {}] log.Log(170): Logging initialized @2315ms to org.eclipse.jetty.util.log.Slf4jLog 2024-11-27T16:20:49,703 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-27T16:20:49,773 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-27T16:20:49,794 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-27T16:20:49,794 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-27T16:20:49,795 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-27T16:20:49,808 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-27T16:20:49,810 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@106ffc0e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/d81050c1-1878-e772-43c1-4ef7cd483263/hadoop.log.dir/,AVAILABLE} 2024-11-27T16:20:49,811 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@704acb07{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-27T16:20:50,005 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@6904431c{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/d81050c1-1878-e772-43c1-4ef7cd483263/java.io.tmpdir/jetty-localhost-38615-hadoop-hdfs-3_4_1-tests_jar-_-any-17858679054006349941/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-27T16:20:50,015 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@20178447{HTTP/1.1, (http/1.1)}{localhost:38615} 2024-11-27T16:20:50,016 INFO [Time-limited test {}] server.Server(415): Started @2702ms 2024-11-27T16:20:50,426 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-27T16:20:50,434 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-27T16:20:50,436 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-27T16:20:50,437 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-27T16:20:50,437 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-27T16:20:50,438 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@ac85cee{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/d81050c1-1878-e772-43c1-4ef7cd483263/hadoop.log.dir/,AVAILABLE} 2024-11-27T16:20:50,438 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@74536f23{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-27T16:20:50,565 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@29607158{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/d81050c1-1878-e772-43c1-4ef7cd483263/java.io.tmpdir/jetty-localhost-43689-hadoop-hdfs-3_4_1-tests_jar-_-any-3588525600276456733/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-27T16:20:50,566 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@76b7aca8{HTTP/1.1, (http/1.1)}{localhost:43689} 2024-11-27T16:20:50,566 INFO [Time-limited test {}] server.Server(415): Started @3253ms 2024-11-27T16:20:50,625 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-27T16:20:51,095 WARN [Thread-72 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/d81050c1-1878-e772-43c1-4ef7cd483263/cluster_6cd18b6a-993a-7aed-ac6c-30d6060ee827/dfs/data/data1/current/BP-632509396-172.17.0.2-1732724449322/current, will proceed with Du for space computation calculation, 2024-11-27T16:20:51,095 WARN [Thread-73 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/d81050c1-1878-e772-43c1-4ef7cd483263/cluster_6cd18b6a-993a-7aed-ac6c-30d6060ee827/dfs/data/data2/current/BP-632509396-172.17.0.2-1732724449322/current, will proceed with Du for space computation calculation, 2024-11-27T16:20:51,137 WARN [Thread-58 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-27T16:20:51,200 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x733f6145784f7256 with lease ID 0x56bb3e393ae27729: Processing first storage report for DS-7ec29dd9-dfa7-441f-844a-fb8335fac6e4 from datanode DatanodeRegistration(127.0.0.1:46329, datanodeUuid=347a9132-bc61-40f9-b233-195ce5f257aa, infoPort=36965, infoSecurePort=0, ipcPort=38657, storageInfo=lv=-57;cid=testClusterID;nsid=1949686029;c=1732724449322) 2024-11-27T16:20:51,202 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x733f6145784f7256 with lease ID 0x56bb3e393ae27729: from storage DS-7ec29dd9-dfa7-441f-844a-fb8335fac6e4 node DatanodeRegistration(127.0.0.1:46329, datanodeUuid=347a9132-bc61-40f9-b233-195ce5f257aa, infoPort=36965, infoSecurePort=0, ipcPort=38657, storageInfo=lv=-57;cid=testClusterID;nsid=1949686029;c=1732724449322), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-27T16:20:51,202 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x733f6145784f7256 with lease ID 0x56bb3e393ae27729: Processing first storage report for DS-2abaa7bf-57a2-40cb-a86c-87be8d65de22 from datanode DatanodeRegistration(127.0.0.1:46329, datanodeUuid=347a9132-bc61-40f9-b233-195ce5f257aa, infoPort=36965, infoSecurePort=0, ipcPort=38657, storageInfo=lv=-57;cid=testClusterID;nsid=1949686029;c=1732724449322) 2024-11-27T16:20:51,202 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x733f6145784f7256 with lease ID 0x56bb3e393ae27729: from storage DS-2abaa7bf-57a2-40cb-a86c-87be8d65de22 node DatanodeRegistration(127.0.0.1:46329, datanodeUuid=347a9132-bc61-40f9-b233-195ce5f257aa, infoPort=36965, infoSecurePort=0, ipcPort=38657, storageInfo=lv=-57;cid=testClusterID;nsid=1949686029;c=1732724449322), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-27T16:20:51,296 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(703): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/d81050c1-1878-e772-43c1-4ef7cd483263 2024-11-27T16:20:51,377 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(259): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/d81050c1-1878-e772-43c1-4ef7cd483263/cluster_6cd18b6a-993a-7aed-ac6c-30d6060ee827/zookeeper_0, clientPort=51088, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/d81050c1-1878-e772-43c1-4ef7cd483263/cluster_6cd18b6a-993a-7aed-ac6c-30d6060ee827/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/d81050c1-1878-e772-43c1-4ef7cd483263/cluster_6cd18b6a-993a-7aed-ac6c-30d6060ee827/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-27T16:20:51,387 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(284): Started MiniZooKeeperCluster and ran 'stat' on client port=51088 2024-11-27T16:20:51,401 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-27T16:20:51,406 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-27T16:20:51,690 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073741825_1001 (size=7) 2024-11-27T16:20:52,096 INFO [Time-limited test {}] util.FSUtils(490): Created version file at hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59 with version=8 2024-11-27T16:20:52,097 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1460): Setting hbase.fs.tmp.dir to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/hbase-staging 2024-11-27T16:20:52,236 DEBUG [Time-limited test {}] channel.MultithreadEventLoopGroup(44): -Dio.netty.eventLoopThreads: 16 2024-11-27T16:20:52,506 INFO [Time-limited test {}] client.ConnectionUtils(129): master/7b191dec6496:0 server-side Connection retries=45 2024-11-27T16:20:52,526 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-27T16:20:52,527 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-27T16:20:52,527 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-27T16:20:52,527 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-27T16:20:52,527 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-27T16:20:52,660 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-27T16:20:52,720 INFO [Time-limited test {}] metrics.MetricRegistriesLoader(60): Loaded MetricRegistries class org.apache.hadoop.hbase.metrics.impl.MetricRegistriesImpl 2024-11-27T16:20:52,729 DEBUG [Time-limited test {}] util.ClassSize(228): Using Unsafe to estimate memory layout 2024-11-27T16:20:52,733 INFO [Time-limited test {}] ipc.NettyRpcServer(315): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-27T16:20:52,760 DEBUG [Time-limited test {}] channel.DefaultChannelId(84): -Dio.netty.processId: 22043 (auto-detected) 2024-11-27T16:20:52,761 DEBUG [Time-limited test {}] channel.DefaultChannelId(106): -Dio.netty.machineId: 02:42:ac:ff:fe:11:00:02 (auto-detected) 2024-11-27T16:20:52,780 INFO [Time-limited test {}] ipc.NettyRpcServer(197): Bind to /172.17.0.2:41377 2024-11-27T16:20:52,788 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-27T16:20:52,790 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-27T16:20:52,803 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(138): Process identifier=master:41377 connecting to ZooKeeper ensemble=127.0.0.1:51088 2024-11-27T16:20:52,835 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:413770x0, quorum=127.0.0.1:51088, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-27T16:20:52,838 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:41377-0x10039c8a1050000 connected 2024-11-27T16:20:52,870 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:41377-0x10039c8a1050000, quorum=127.0.0.1:51088, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-27T16:20:52,873 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:41377-0x10039c8a1050000, quorum=127.0.0.1:51088, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-27T16:20:52,876 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:41377-0x10039c8a1050000, quorum=127.0.0.1:51088, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-27T16:20:52,880 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=41377 2024-11-27T16:20:52,881 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=41377 2024-11-27T16:20:52,883 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=41377 2024-11-27T16:20:52,884 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=41377 2024-11-27T16:20:52,888 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=41377 2024-11-27T16:20:52,895 INFO [Time-limited test {}] master.HMaster(488): hbase.rootdir=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59, hbase.cluster.distributed=false 2024-11-27T16:20:52,968 INFO [Time-limited test {}] client.ConnectionUtils(129): regionserver/7b191dec6496:0 server-side Connection retries=45 2024-11-27T16:20:52,968 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-27T16:20:52,969 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-27T16:20:52,969 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-27T16:20:52,969 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-27T16:20:52,969 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-27T16:20:52,972 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-27T16:20:52,975 INFO [Time-limited test {}] ipc.NettyRpcServer(315): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-27T16:20:52,976 INFO [Time-limited test {}] ipc.NettyRpcServer(197): Bind to /172.17.0.2:44169 2024-11-27T16:20:52,978 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-27T16:20:52,983 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-27T16:20:52,985 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-27T16:20:52,990 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-27T16:20:52,994 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(138): Process identifier=regionserver:44169 connecting to ZooKeeper ensemble=127.0.0.1:51088 2024-11-27T16:20:52,999 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:441690x0, quorum=127.0.0.1:51088, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-27T16:20:53,000 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:441690x0, quorum=127.0.0.1:51088, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-27T16:20:53,000 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:44169-0x10039c8a1050001 connected 2024-11-27T16:20:53,002 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:44169-0x10039c8a1050001, quorum=127.0.0.1:51088, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-27T16:20:53,003 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:44169-0x10039c8a1050001, quorum=127.0.0.1:51088, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-27T16:20:53,006 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=44169 2024-11-27T16:20:53,007 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=44169 2024-11-27T16:20:53,007 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=44169 2024-11-27T16:20:53,009 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=44169 2024-11-27T16:20:53,010 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=44169 2024-11-27T16:20:53,013 INFO [master/7b191dec6496:0:becomeActiveMaster {}] master.HMaster(2445): Adding backup master ZNode /hbase/backup-masters/7b191dec6496,41377,1732724452229 2024-11-27T16:20:53,019 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44169-0x10039c8a1050001, quorum=127.0.0.1:51088, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-27T16:20:53,020 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41377-0x10039c8a1050000, quorum=127.0.0.1:51088, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-27T16:20:53,022 DEBUG [master/7b191dec6496:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:41377-0x10039c8a1050000, quorum=127.0.0.1:51088, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/7b191dec6496,41377,1732724452229 2024-11-27T16:20:53,030 DEBUG [M:0;7b191dec6496:41377 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;7b191dec6496:41377 2024-11-27T16:20:53,051 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44169-0x10039c8a1050001, quorum=127.0.0.1:51088, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-27T16:20:53,051 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41377-0x10039c8a1050000, quorum=127.0.0.1:51088, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-27T16:20:53,051 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41377-0x10039c8a1050000, quorum=127.0.0.1:51088, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-27T16:20:53,052 DEBUG [master/7b191dec6496:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:41377-0x10039c8a1050000, quorum=127.0.0.1:51088, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-27T16:20:53,051 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44169-0x10039c8a1050001, quorum=127.0.0.1:51088, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-27T16:20:53,053 INFO [master/7b191dec6496:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/7b191dec6496,41377,1732724452229 from backup master directory 2024-11-27T16:20:53,053 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(111): master:41377-0x10039c8a1050000, quorum=127.0.0.1:51088, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-27T16:20:53,056 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41377-0x10039c8a1050000, quorum=127.0.0.1:51088, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/7b191dec6496,41377,1732724452229 2024-11-27T16:20:53,057 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44169-0x10039c8a1050001, quorum=127.0.0.1:51088, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-27T16:20:53,057 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41377-0x10039c8a1050000, quorum=127.0.0.1:51088, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-27T16:20:53,057 WARN [master/7b191dec6496:0:becomeActiveMaster {}] hbase.ZNodeClearer(69): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-27T16:20:53,058 INFO [master/7b191dec6496:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=7b191dec6496,41377,1732724452229 2024-11-27T16:20:53,060 INFO [master/7b191dec6496:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating data MemStoreChunkPool with chunk size 2 MB, max count 396, initial count 0 2024-11-27T16:20:53,061 INFO [master/7b191dec6496:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating index MemStoreChunkPool with chunk size 204.80 KB, max count 440, initial count 0 2024-11-27T16:20:53,137 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073741826_1002 (size=42) 2024-11-27T16:20:53,541 DEBUG [master/7b191dec6496:0:becomeActiveMaster {}] util.FSUtils(639): Created cluster ID file at hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/hbase.id with ID: 35b8a79a-cf94-4fc3-8606-c6554b15a17c 2024-11-27T16:20:53,580 INFO [master/7b191dec6496:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-27T16:20:53,607 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44169-0x10039c8a1050001, quorum=127.0.0.1:51088, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-27T16:20:53,607 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41377-0x10039c8a1050000, quorum=127.0.0.1:51088, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-27T16:20:53,622 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073741827_1003 (size=196) 2024-11-27T16:20:53,644 INFO [master/7b191dec6496:0:becomeActiveMaster {}] region.MasterRegion(372): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-27T16:20:53,646 INFO [master/7b191dec6496:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-27T16:20:53,669 DEBUG [master/7b191dec6496:0:becomeActiveMaster {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(244): No decryptEncryptedDataEncryptionKey method in DFSClient, should be hadoop version with HDFS-12396 java.lang.NoSuchMethodException: org.apache.hadoop.hdfs.DFSClient.decryptEncryptedDataEncryptionKey(org.apache.hadoop.fs.FileEncryptionInfo) at java.lang.Class.getDeclaredMethod(Class.java:2675) ~[?:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelperWithoutHDFS12396(FanOutOneBlockAsyncDFSOutputSaslHelper.java:183) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelper(FanOutOneBlockAsyncDFSOutputSaslHelper.java:242) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.(FanOutOneBlockAsyncDFSOutputSaslHelper.java:253) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at java.lang.Class.forName0(Native Method) ~[?:?] at java.lang.Class.forName(Class.java:375) ~[?:?] at org.apache.hadoop.hbase.wal.AsyncFSWALProvider.load(AsyncFSWALProvider.java:147) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.getProviderClass(WALFactory.java:160) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.getProvider(WALFactory.java:200) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.(WALFactory.java:232) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.(WALFactory.java:207) ~[classes/:?] at org.apache.hadoop.hbase.master.region.MasterRegion.create(MasterRegion.java:402) ~[classes/:?] at org.apache.hadoop.hbase.master.region.MasterRegionFactory.create(MasterRegionFactory.java:135) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.finishActiveMasterInitialization(HMaster.java:973) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.startActiveMasterManager(HMaster.java:2470) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.lambda$run$0(HMaster.java:590) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.HMaster.lambda$run$1(HMaster.java:587) ~[classes/:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:20:53,674 INFO [master/7b191dec6496:0:becomeActiveMaster {}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-27T16:20:53,705 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073741828_1004 (size=1189) 2024-11-27T16:20:54,125 INFO [master/7b191dec6496:0:becomeActiveMaster {}] regionserver.HRegion(7124): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/MasterData/data/master/store 2024-11-27T16:20:54,140 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073741829_1005 (size=34) 2024-11-27T16:20:54,548 INFO [master/7b191dec6496:0:becomeActiveMaster {}] throttle.StoreHotnessProtector(112): StoreHotnessProtector is disabled. Set hbase.region.store.parallel.put.limit > 0 to enable, which may help mitigate load under heavy write pressure. 2024-11-27T16:20:54,549 DEBUG [master/7b191dec6496:0:becomeActiveMaster {}] regionserver.HRegion(894): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-27T16:20:54,550 DEBUG [master/7b191dec6496:0:becomeActiveMaster {}] regionserver.HRegion(1681): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-27T16:20:54,550 INFO [master/7b191dec6496:0:becomeActiveMaster {}] regionserver.HRegion(1703): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-27T16:20:54,551 DEBUG [master/7b191dec6496:0:becomeActiveMaster {}] regionserver.HRegion(1724): Waiting without time limit for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-27T16:20:54,551 DEBUG [master/7b191dec6496:0:becomeActiveMaster {}] regionserver.HRegion(1791): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-27T16:20:54,551 DEBUG [master/7b191dec6496:0:becomeActiveMaster {}] regionserver.HRegion(1801): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-27T16:20:54,551 INFO [master/7b191dec6496:0:becomeActiveMaster {}] regionserver.HRegion(1922): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-27T16:20:54,551 DEBUG [master/7b191dec6496:0:becomeActiveMaster {}] regionserver.HRegion(1635): Region close journal for 1595e783b53d99cd5eef43b6debb2682: 2024-11-27T16:20:54,554 WARN [master/7b191dec6496:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/MasterData/data/master/store/.initializing 2024-11-27T16:20:54,554 DEBUG [master/7b191dec6496:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/MasterData/WALs/7b191dec6496,41377,1732724452229 2024-11-27T16:20:54,562 INFO [master/7b191dec6496:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-27T16:20:54,574 INFO [master/7b191dec6496:0:becomeActiveMaster {}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=7b191dec6496%2C41377%2C1732724452229, suffix=, logDir=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/MasterData/WALs/7b191dec6496,41377,1732724452229, archiveDir=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/MasterData/oldWALs, maxLogs=10 2024-11-27T16:20:54,596 DEBUG [master/7b191dec6496:0:becomeActiveMaster {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(617): When create output stream for /user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/MasterData/WALs/7b191dec6496,41377,1732724452229/7b191dec6496%2C41377%2C1732724452229.1732724454578, exclude list is [], retry=0 2024-11-27T16:20:54,612 DEBUG [RS-EventLoopGroup-3-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:46329,DS-7ec29dd9-dfa7-441f-844a-fb8335fac6e4,DISK] 2024-11-27T16:20:54,615 DEBUG [RS-EventLoopGroup-3-2 {}] asyncfs.ProtobufDecoder(117): Hadoop 3.3 and above shades protobuf. 2024-11-27T16:20:54,653 INFO [master/7b191dec6496:0:becomeActiveMaster {}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/MasterData/WALs/7b191dec6496,41377,1732724452229/7b191dec6496%2C41377%2C1732724452229.1732724454578 2024-11-27T16:20:54,654 DEBUG [master/7b191dec6496:0:becomeActiveMaster {}] wal.AbstractFSWAL(925): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:36965:36965)] 2024-11-27T16:20:54,654 DEBUG [master/7b191dec6496:0:becomeActiveMaster {}] regionserver.HRegion(7285): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-27T16:20:54,655 DEBUG [master/7b191dec6496:0:becomeActiveMaster {}] regionserver.HRegion(894): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-27T16:20:54,658 DEBUG [master/7b191dec6496:0:becomeActiveMaster {}] regionserver.HRegion(7327): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-27T16:20:54,659 DEBUG [master/7b191dec6496:0:becomeActiveMaster {}] regionserver.HRegion(7330): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-27T16:20:54,696 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-27T16:20:54,720 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-27T16:20:54,724 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:20:54,727 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-27T16:20:54,727 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-27T16:20:54,731 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-27T16:20:54,731 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:20:54,732 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-27T16:20:54,732 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-27T16:20:54,734 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-27T16:20:54,735 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:20:54,736 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-27T16:20:54,736 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-27T16:20:54,738 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-27T16:20:54,738 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:20:54,739 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-27T16:20:54,742 DEBUG [master/7b191dec6496:0:becomeActiveMaster {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-27T16:20:54,743 DEBUG [master/7b191dec6496:0:becomeActiveMaster {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-27T16:20:54,752 DEBUG [master/7b191dec6496:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-27T16:20:54,755 DEBUG [master/7b191dec6496:0:becomeActiveMaster {}] regionserver.HRegion(1085): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-27T16:20:54,770 DEBUG [master/7b191dec6496:0:becomeActiveMaster {}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-27T16:20:54,772 INFO [master/7b191dec6496:0:becomeActiveMaster {}] regionserver.HRegion(1102): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=71586426, jitterRate=0.0667208731174469}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-27T16:20:54,776 DEBUG [master/7b191dec6496:0:becomeActiveMaster {}] regionserver.HRegion(1001): Region open journal for 1595e783b53d99cd5eef43b6debb2682: 2024-11-27T16:20:54,777 INFO [master/7b191dec6496:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-27T16:20:54,805 DEBUG [master/7b191dec6496:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1eba6fcf, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-27T16:20:54,839 INFO [master/7b191dec6496:0:becomeActiveMaster {}] master.HMaster(882): No meta location available on zookeeper, skip migrating... 2024-11-27T16:20:54,851 INFO [master/7b191dec6496:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-27T16:20:54,851 INFO [master/7b191dec6496:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(633): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-27T16:20:54,853 INFO [master/7b191dec6496:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-27T16:20:54,855 INFO [master/7b191dec6496:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(653): Recovered RegionProcedureStore lease in 1 msec 2024-11-27T16:20:54,860 INFO [master/7b191dec6496:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(667): Loaded RegionProcedureStore in 4 msec 2024-11-27T16:20:54,860 INFO [master/7b191dec6496:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-27T16:20:54,885 INFO [master/7b191dec6496:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-27T16:20:54,897 DEBUG [master/7b191dec6496:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41377-0x10039c8a1050000, quorum=127.0.0.1:51088, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-27T16:20:54,899 DEBUG [master/7b191dec6496:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/balancer already deleted, retry=false 2024-11-27T16:20:54,901 INFO [master/7b191dec6496:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-27T16:20:54,903 DEBUG [master/7b191dec6496:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41377-0x10039c8a1050000, quorum=127.0.0.1:51088, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-27T16:20:54,904 DEBUG [master/7b191dec6496:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/normalizer already deleted, retry=false 2024-11-27T16:20:54,906 INFO [master/7b191dec6496:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-27T16:20:54,909 DEBUG [master/7b191dec6496:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41377-0x10039c8a1050000, quorum=127.0.0.1:51088, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-27T16:20:54,911 DEBUG [master/7b191dec6496:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/switch/split already deleted, retry=false 2024-11-27T16:20:54,912 DEBUG [master/7b191dec6496:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41377-0x10039c8a1050000, quorum=127.0.0.1:51088, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-27T16:20:54,914 DEBUG [master/7b191dec6496:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/switch/merge already deleted, retry=false 2024-11-27T16:20:54,923 DEBUG [master/7b191dec6496:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41377-0x10039c8a1050000, quorum=127.0.0.1:51088, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-27T16:20:54,924 DEBUG [master/7b191dec6496:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-27T16:20:54,928 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44169-0x10039c8a1050001, quorum=127.0.0.1:51088, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-27T16:20:54,928 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41377-0x10039c8a1050000, quorum=127.0.0.1:51088, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-27T16:20:54,928 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44169-0x10039c8a1050001, quorum=127.0.0.1:51088, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-27T16:20:54,928 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41377-0x10039c8a1050000, quorum=127.0.0.1:51088, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-27T16:20:54,929 INFO [master/7b191dec6496:0:becomeActiveMaster {}] master.HMaster(826): Active/primary master=7b191dec6496,41377,1732724452229, sessionid=0x10039c8a1050000, setting cluster-up flag (Was=false) 2024-11-27T16:20:54,943 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41377-0x10039c8a1050000, quorum=127.0.0.1:51088, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-27T16:20:54,943 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44169-0x10039c8a1050001, quorum=127.0.0.1:51088, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-27T16:20:54,949 DEBUG [master/7b191dec6496:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-27T16:20:54,950 DEBUG [master/7b191dec6496:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=7b191dec6496,41377,1732724452229 2024-11-27T16:20:54,956 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44169-0x10039c8a1050001, quorum=127.0.0.1:51088, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-27T16:20:54,956 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41377-0x10039c8a1050000, quorum=127.0.0.1:51088, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-27T16:20:54,964 DEBUG [master/7b191dec6496:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-27T16:20:54,965 DEBUG [master/7b191dec6496:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=7b191dec6496,41377,1732724452229 2024-11-27T16:20:55,026 DEBUG [RS:0;7b191dec6496:44169 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;7b191dec6496:44169 2024-11-27T16:20:55,027 INFO [RS:0;7b191dec6496:44169 {}] regionserver.HRegionServer(1008): ClusterId : 35b8a79a-cf94-4fc3-8606-c6554b15a17c 2024-11-27T16:20:55,030 DEBUG [RS:0;7b191dec6496:44169 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-27T16:20:55,035 DEBUG [RS:0;7b191dec6496:44169 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-27T16:20:55,035 DEBUG [RS:0;7b191dec6496:44169 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-27T16:20:55,038 DEBUG [RS:0;7b191dec6496:44169 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-27T16:20:55,039 DEBUG [RS:0;7b191dec6496:44169 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7ef4a5bd, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-27T16:20:55,041 DEBUG [RS:0;7b191dec6496:44169 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@f5aa446, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=7b191dec6496/172.17.0.2:0 2024-11-27T16:20:55,044 INFO [RS:0;7b191dec6496:44169 {}] regionserver.RegionServerCoprocessorHost(67): System coprocessor loading is enabled 2024-11-27T16:20:55,044 INFO [RS:0;7b191dec6496:44169 {}] regionserver.RegionServerCoprocessorHost(68): Table coprocessor loading is enabled 2024-11-27T16:20:55,045 DEBUG [RS:0;7b191dec6496:44169 {}] regionserver.HRegionServer(1090): About to register with Master. 2024-11-27T16:20:55,047 INFO [RS:0;7b191dec6496:44169 {}] regionserver.HRegionServer(3073): reportForDuty to master=7b191dec6496,41377,1732724452229 with isa=7b191dec6496/172.17.0.2:44169, startcode=1732724452967 2024-11-27T16:20:55,052 DEBUG [master/7b191dec6496:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT; InitMetaProcedure table=hbase:meta 2024-11-27T16:20:55,059 INFO [master/7b191dec6496:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(575): slop=0.2 2024-11-27T16:20:55,059 DEBUG [RS:0;7b191dec6496:44169 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-27T16:20:55,063 INFO [master/7b191dec6496:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(294): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-27T16:20:55,070 DEBUG [master/7b191dec6496:0:becomeActiveMaster {}] balancer.RegionLocationFinder(146): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 7b191dec6496,41377,1732724452229 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-27T16:20:55,075 DEBUG [master/7b191dec6496:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/7b191dec6496:0, corePoolSize=5, maxPoolSize=5 2024-11-27T16:20:55,075 DEBUG [master/7b191dec6496:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/7b191dec6496:0, corePoolSize=5, maxPoolSize=5 2024-11-27T16:20:55,076 DEBUG [master/7b191dec6496:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/7b191dec6496:0, corePoolSize=5, maxPoolSize=5 2024-11-27T16:20:55,076 DEBUG [master/7b191dec6496:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/7b191dec6496:0, corePoolSize=5, maxPoolSize=5 2024-11-27T16:20:55,076 DEBUG [master/7b191dec6496:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/7b191dec6496:0, corePoolSize=10, maxPoolSize=10 2024-11-27T16:20:55,076 DEBUG [master/7b191dec6496:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/7b191dec6496:0, corePoolSize=1, maxPoolSize=1 2024-11-27T16:20:55,077 DEBUG [master/7b191dec6496:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/7b191dec6496:0, corePoolSize=2, maxPoolSize=2 2024-11-27T16:20:55,077 DEBUG [master/7b191dec6496:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/7b191dec6496:0, corePoolSize=1, maxPoolSize=1 2024-11-27T16:20:55,083 INFO [master/7b191dec6496:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1732724485083 2024-11-27T16:20:55,085 INFO [master/7b191dec6496:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-27T16:20:55,087 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(96): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, locked=true; InitMetaProcedure table=hbase:meta 2024-11-27T16:20:55,090 INFO [master/7b191dec6496:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-27T16:20:55,090 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(75): BOOTSTRAP: creating hbase:meta region 2024-11-27T16:20:55,094 INFO [master/7b191dec6496:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-27T16:20:55,095 INFO [master/7b191dec6496:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-27T16:20:55,095 INFO [master/7b191dec6496:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-27T16:20:55,095 INFO [master/7b191dec6496:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-27T16:20:55,095 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:20:55,095 INFO [PEWorker-1 {}] util.FSTableDescriptors(133): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-27T16:20:55,097 INFO [master/7b191dec6496:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-27T16:20:55,099 INFO [master/7b191dec6496:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-27T16:20:55,100 INFO [RS-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37737, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-11-27T16:20:55,101 INFO [master/7b191dec6496:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-27T16:20:55,101 INFO [master/7b191dec6496:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-27T16:20:55,107 INFO [master/7b191dec6496:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-27T16:20:55,108 INFO [master/7b191dec6496:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-27T16:20:55,108 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41377 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3280) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerStartup(MasterRpcServices.java:593) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16714) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:20:55,110 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073741831_1007 (size=1039) 2024-11-27T16:20:55,112 DEBUG [master/7b191dec6496:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/7b191dec6496:0:becomeActiveMaster-HFileCleaner.large.0-1732724455109,5,FailOnTimeoutGroup] 2024-11-27T16:20:55,112 INFO [PEWorker-1 {}] util.FSTableDescriptors(140): Updated hbase:meta table descriptor to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1039 2024-11-27T16:20:55,113 INFO [PEWorker-1 {}] regionserver.HRegion(7106): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59 2024-11-27T16:20:55,120 DEBUG [master/7b191dec6496:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/7b191dec6496:0:becomeActiveMaster-HFileCleaner.small.0-1732724455112,5,FailOnTimeoutGroup] 2024-11-27T16:20:55,120 INFO [master/7b191dec6496:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-27T16:20:55,120 INFO [master/7b191dec6496:0:becomeActiveMaster {}] master.HMaster(1680): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-27T16:20:55,122 INFO [master/7b191dec6496:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-27T16:20:55,122 INFO [master/7b191dec6496:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-27T16:20:55,130 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073741832_1008 (size=32) 2024-11-27T16:20:55,137 DEBUG [RS:0;7b191dec6496:44169 {}] regionserver.HRegionServer(3097): Master is not running yet 2024-11-27T16:20:55,137 WARN [RS:0;7b191dec6496:44169 {}] regionserver.HRegionServer(1099): reportForDuty failed; sleeping 100 ms and then retrying. 2024-11-27T16:20:55,239 INFO [RS:0;7b191dec6496:44169 {}] regionserver.HRegionServer(3073): reportForDuty to master=7b191dec6496,41377,1732724452229 with isa=7b191dec6496/172.17.0.2:44169, startcode=1732724452967 2024-11-27T16:20:55,241 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41377 {}] master.ServerManager(332): Checking decommissioned status of RegionServer 7b191dec6496,44169,1732724452967 2024-11-27T16:20:55,244 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41377 {}] master.ServerManager(486): Registering regionserver=7b191dec6496,44169,1732724452967 2024-11-27T16:20:55,254 DEBUG [RS:0;7b191dec6496:44169 {}] regionserver.HRegionServer(1725): Config from master: hbase.rootdir=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59 2024-11-27T16:20:55,254 DEBUG [RS:0;7b191dec6496:44169 {}] regionserver.HRegionServer(1725): Config from master: fs.defaultFS=hdfs://localhost:34065 2024-11-27T16:20:55,254 DEBUG [RS:0;7b191dec6496:44169 {}] regionserver.HRegionServer(1725): Config from master: hbase.master.info.port=-1 2024-11-27T16:20:55,258 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41377-0x10039c8a1050000, quorum=127.0.0.1:51088, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-27T16:20:55,259 DEBUG [RS:0;7b191dec6496:44169 {}] zookeeper.ZKUtil(111): regionserver:44169-0x10039c8a1050001, quorum=127.0.0.1:51088, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/7b191dec6496,44169,1732724452967 2024-11-27T16:20:55,259 WARN [RS:0;7b191dec6496:44169 {}] hbase.ZNodeClearer(69): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-27T16:20:55,259 INFO [RS:0;7b191dec6496:44169 {}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-27T16:20:55,260 DEBUG [RS:0;7b191dec6496:44169 {}] regionserver.HRegionServer(2100): logDir=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/WALs/7b191dec6496,44169,1732724452967 2024-11-27T16:20:55,261 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [7b191dec6496,44169,1732724452967] 2024-11-27T16:20:55,273 DEBUG [RS:0;7b191dec6496:44169 {}] regionserver.Replication(140): Replication stats-in-log period=300 seconds 2024-11-27T16:20:55,285 INFO [RS:0;7b191dec6496:44169 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-27T16:20:55,297 INFO [RS:0;7b191dec6496:44169 {}] regionserver.MemStoreFlusher(130): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-27T16:20:55,299 INFO [RS:0;7b191dec6496:44169 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-27T16:20:55,300 INFO [RS:0;7b191dec6496:44169 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-27T16:20:55,300 INFO [RS:0;7b191dec6496:44169 {}] regionserver.HRegionServer$CompactionChecker(1988): CompactionChecker runs every PT1S 2024-11-27T16:20:55,307 INFO [RS:0;7b191dec6496:44169 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-27T16:20:55,307 DEBUG [RS:0;7b191dec6496:44169 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/7b191dec6496:0, corePoolSize=1, maxPoolSize=1 2024-11-27T16:20:55,307 DEBUG [RS:0;7b191dec6496:44169 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/7b191dec6496:0, corePoolSize=1, maxPoolSize=1 2024-11-27T16:20:55,307 DEBUG [RS:0;7b191dec6496:44169 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/7b191dec6496:0, corePoolSize=1, maxPoolSize=1 2024-11-27T16:20:55,308 DEBUG [RS:0;7b191dec6496:44169 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/7b191dec6496:0, corePoolSize=1, maxPoolSize=1 2024-11-27T16:20:55,308 DEBUG [RS:0;7b191dec6496:44169 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/7b191dec6496:0, corePoolSize=1, maxPoolSize=1 2024-11-27T16:20:55,308 DEBUG [RS:0;7b191dec6496:44169 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/7b191dec6496:0, corePoolSize=2, maxPoolSize=2 2024-11-27T16:20:55,308 DEBUG [RS:0;7b191dec6496:44169 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/7b191dec6496:0, corePoolSize=1, maxPoolSize=1 2024-11-27T16:20:55,308 DEBUG [RS:0;7b191dec6496:44169 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/7b191dec6496:0, corePoolSize=1, maxPoolSize=1 2024-11-27T16:20:55,308 DEBUG [RS:0;7b191dec6496:44169 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/7b191dec6496:0, corePoolSize=1, maxPoolSize=1 2024-11-27T16:20:55,308 DEBUG [RS:0;7b191dec6496:44169 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/7b191dec6496:0, corePoolSize=1, maxPoolSize=1 2024-11-27T16:20:55,309 DEBUG [RS:0;7b191dec6496:44169 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/7b191dec6496:0, corePoolSize=1, maxPoolSize=1 2024-11-27T16:20:55,309 DEBUG [RS:0;7b191dec6496:44169 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/7b191dec6496:0, corePoolSize=3, maxPoolSize=3 2024-11-27T16:20:55,309 DEBUG [RS:0;7b191dec6496:44169 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0, corePoolSize=3, maxPoolSize=3 2024-11-27T16:20:55,312 INFO [RS:0;7b191dec6496:44169 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-27T16:20:55,312 INFO [RS:0;7b191dec6496:44169 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-27T16:20:55,312 INFO [RS:0;7b191dec6496:44169 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-27T16:20:55,312 INFO [RS:0;7b191dec6496:44169 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-27T16:20:55,312 INFO [RS:0;7b191dec6496:44169 {}] hbase.ChoreService(168): Chore ScheduledChore name=7b191dec6496,44169,1732724452967-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-27T16:20:55,333 INFO [RS:0;7b191dec6496:44169 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-27T16:20:55,335 INFO [RS:0;7b191dec6496:44169 {}] hbase.ChoreService(168): Chore ScheduledChore name=7b191dec6496,44169,1732724452967-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-27T16:20:55,354 INFO [RS:0;7b191dec6496:44169 {}] regionserver.Replication(204): 7b191dec6496,44169,1732724452967 started 2024-11-27T16:20:55,355 INFO [RS:0;7b191dec6496:44169 {}] regionserver.HRegionServer(1767): Serving as 7b191dec6496,44169,1732724452967, RpcServer on 7b191dec6496/172.17.0.2:44169, sessionid=0x10039c8a1050001 2024-11-27T16:20:55,355 DEBUG [RS:0;7b191dec6496:44169 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-27T16:20:55,356 DEBUG [RS:0;7b191dec6496:44169 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 7b191dec6496,44169,1732724452967 2024-11-27T16:20:55,356 DEBUG [RS:0;7b191dec6496:44169 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '7b191dec6496,44169,1732724452967' 2024-11-27T16:20:55,356 DEBUG [RS:0;7b191dec6496:44169 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-27T16:20:55,357 DEBUG [RS:0;7b191dec6496:44169 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-27T16:20:55,357 DEBUG [RS:0;7b191dec6496:44169 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-27T16:20:55,357 DEBUG [RS:0;7b191dec6496:44169 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-27T16:20:55,357 DEBUG [RS:0;7b191dec6496:44169 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 7b191dec6496,44169,1732724452967 2024-11-27T16:20:55,357 DEBUG [RS:0;7b191dec6496:44169 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '7b191dec6496,44169,1732724452967' 2024-11-27T16:20:55,357 DEBUG [RS:0;7b191dec6496:44169 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-27T16:20:55,358 DEBUG [RS:0;7b191dec6496:44169 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-27T16:20:55,358 DEBUG [RS:0;7b191dec6496:44169 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-27T16:20:55,359 INFO [RS:0;7b191dec6496:44169 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-27T16:20:55,359 INFO [RS:0;7b191dec6496:44169 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-27T16:20:55,464 INFO [RS:0;7b191dec6496:44169 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-27T16:20:55,469 INFO [RS:0;7b191dec6496:44169 {}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=7b191dec6496%2C44169%2C1732724452967, suffix=, logDir=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/WALs/7b191dec6496,44169,1732724452967, archiveDir=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/oldWALs, maxLogs=32 2024-11-27T16:20:55,490 DEBUG [RS:0;7b191dec6496:44169 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(617): When create output stream for /user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/WALs/7b191dec6496,44169,1732724452967/7b191dec6496%2C44169%2C1732724452967.1732724455472, exclude list is [], retry=0 2024-11-27T16:20:55,495 DEBUG [RS-EventLoopGroup-3-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:46329,DS-7ec29dd9-dfa7-441f-844a-fb8335fac6e4,DISK] 2024-11-27T16:20:55,499 INFO [RS:0;7b191dec6496:44169 {}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/WALs/7b191dec6496,44169,1732724452967/7b191dec6496%2C44169%2C1732724452967.1732724455472 2024-11-27T16:20:55,499 DEBUG [RS:0;7b191dec6496:44169 {}] wal.AbstractFSWAL(925): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:36965:36965)] 2024-11-27T16:20:55,532 DEBUG [PEWorker-1 {}] regionserver.HRegion(894): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-27T16:20:55,535 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-27T16:20:55,538 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-27T16:20:55,538 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:20:55,539 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-27T16:20:55,539 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-27T16:20:55,541 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-27T16:20:55,542 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:20:55,542 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-27T16:20:55,543 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-27T16:20:55,545 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-27T16:20:55,545 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:20:55,546 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-27T16:20:55,547 DEBUG [PEWorker-1 {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/hbase/meta/1588230740 2024-11-27T16:20:55,548 DEBUG [PEWorker-1 {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/hbase/meta/1588230740 2024-11-27T16:20:55,552 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-27T16:20:55,555 DEBUG [PEWorker-1 {}] regionserver.HRegion(1085): writing seq id for 1588230740 2024-11-27T16:20:55,559 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-27T16:20:55,560 INFO [PEWorker-1 {}] regionserver.HRegion(1102): Opened 1588230740; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=58925552, jitterRate=-0.12194085121154785}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-27T16:20:55,562 DEBUG [PEWorker-1 {}] regionserver.HRegion(1001): Region open journal for 1588230740: 2024-11-27T16:20:55,562 DEBUG [PEWorker-1 {}] regionserver.HRegion(1681): Closing 1588230740, disabling compactions & flushes 2024-11-27T16:20:55,563 INFO [PEWorker-1 {}] regionserver.HRegion(1703): Closing region hbase:meta,,1.1588230740 2024-11-27T16:20:55,563 DEBUG [PEWorker-1 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:meta,,1.1588230740 2024-11-27T16:20:55,563 DEBUG [PEWorker-1 {}] regionserver.HRegion(1791): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-27T16:20:55,563 DEBUG [PEWorker-1 {}] regionserver.HRegion(1801): Updates disabled for region hbase:meta,,1.1588230740 2024-11-27T16:20:55,564 INFO [PEWorker-1 {}] regionserver.HRegion(1922): Closed hbase:meta,,1.1588230740 2024-11-27T16:20:55,564 DEBUG [PEWorker-1 {}] regionserver.HRegion(1635): Region close journal for 1588230740: 2024-11-27T16:20:55,567 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(96): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, locked=true; InitMetaProcedure table=hbase:meta 2024-11-27T16:20:55,567 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(107): Going to assign meta 2024-11-27T16:20:55,573 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-27T16:20:55,581 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-27T16:20:55,583 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(264): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-27T16:20:55,735 DEBUG [7b191dec6496:41377 {}] assignment.AssignmentManager(2444): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-27T16:20:55,740 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=7b191dec6496,44169,1732724452967 2024-11-27T16:20:55,745 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 7b191dec6496,44169,1732724452967, state=OPENING 2024-11-27T16:20:55,751 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-27T16:20:55,752 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44169-0x10039c8a1050001, quorum=127.0.0.1:51088, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-27T16:20:55,753 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41377-0x10039c8a1050000, quorum=127.0.0.1:51088, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-27T16:20:55,753 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-27T16:20:55,753 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-27T16:20:55,755 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE; OpenRegionProcedure 1588230740, server=7b191dec6496,44169,1732724452967}] 2024-11-27T16:20:55,929 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:20:55,931 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-27T16:20:55,934 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41048, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-27T16:20:55,945 INFO [RS_OPEN_META-regionserver/7b191dec6496:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(135): Open hbase:meta,,1.1588230740 2024-11-27T16:20:55,945 INFO [RS_OPEN_META-regionserver/7b191dec6496:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-27T16:20:55,946 INFO [RS_OPEN_META-regionserver/7b191dec6496:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor .meta 2024-11-27T16:20:55,950 INFO [RS_OPEN_META-regionserver/7b191dec6496:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=7b191dec6496%2C44169%2C1732724452967.meta, suffix=.meta, logDir=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/WALs/7b191dec6496,44169,1732724452967, archiveDir=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/oldWALs, maxLogs=32 2024-11-27T16:20:55,974 DEBUG [RS_OPEN_META-regionserver/7b191dec6496:0-0 {event_type=M_RS_OPEN_META, pid=3}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(617): When create output stream for /user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/WALs/7b191dec6496,44169,1732724452967/7b191dec6496%2C44169%2C1732724452967.meta.1732724455952.meta, exclude list is [], retry=0 2024-11-27T16:20:55,978 DEBUG [RS-EventLoopGroup-3-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:46329,DS-7ec29dd9-dfa7-441f-844a-fb8335fac6e4,DISK] 2024-11-27T16:20:55,981 INFO [RS_OPEN_META-regionserver/7b191dec6496:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/WALs/7b191dec6496,44169,1732724452967/7b191dec6496%2C44169%2C1732724452967.meta.1732724455952.meta 2024-11-27T16:20:55,982 DEBUG [RS_OPEN_META-regionserver/7b191dec6496:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(925): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:36965:36965)] 2024-11-27T16:20:55,982 DEBUG [RS_OPEN_META-regionserver/7b191dec6496:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7285): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-27T16:20:55,984 DEBUG [RS_OPEN_META-regionserver/7b191dec6496:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-27T16:20:56,045 DEBUG [RS_OPEN_META-regionserver/7b191dec6496:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7999): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-27T16:20:56,050 INFO [RS_OPEN_META-regionserver/7b191dec6496:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(436): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-27T16:20:56,054 DEBUG [RS_OPEN_META-regionserver/7b191dec6496:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-27T16:20:56,054 DEBUG [RS_OPEN_META-regionserver/7b191dec6496:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(894): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-27T16:20:56,055 DEBUG [RS_OPEN_META-regionserver/7b191dec6496:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7327): checking encryption for 1588230740 2024-11-27T16:20:56,055 DEBUG [RS_OPEN_META-regionserver/7b191dec6496:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7330): checking classloading for 1588230740 2024-11-27T16:20:56,058 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-27T16:20:56,060 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-27T16:20:56,060 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:20:56,061 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-27T16:20:56,061 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-27T16:20:56,063 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-27T16:20:56,063 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:20:56,064 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-27T16:20:56,064 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-27T16:20:56,065 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-27T16:20:56,065 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:20:56,066 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-27T16:20:56,067 DEBUG [RS_OPEN_META-regionserver/7b191dec6496:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/hbase/meta/1588230740 2024-11-27T16:20:56,070 DEBUG [RS_OPEN_META-regionserver/7b191dec6496:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/hbase/meta/1588230740 2024-11-27T16:20:56,073 DEBUG [RS_OPEN_META-regionserver/7b191dec6496:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-27T16:20:56,076 DEBUG [RS_OPEN_META-regionserver/7b191dec6496:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1085): writing seq id for 1588230740 2024-11-27T16:20:56,078 INFO [RS_OPEN_META-regionserver/7b191dec6496:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1102): Opened 1588230740; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=64706077, jitterRate=-0.03580431640148163}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-27T16:20:56,080 DEBUG [RS_OPEN_META-regionserver/7b191dec6496:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1001): Region open journal for 1588230740: 2024-11-27T16:20:56,090 INFO [RS_OPEN_META-regionserver/7b191dec6496:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2601): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1732724455924 2024-11-27T16:20:56,102 DEBUG [RS_OPEN_META-regionserver/7b191dec6496:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2628): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-27T16:20:56,102 INFO [RS_OPEN_META-regionserver/7b191dec6496:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(164): Opened hbase:meta,,1.1588230740 2024-11-27T16:20:56,103 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=7b191dec6496,44169,1732724452967 2024-11-27T16:20:56,106 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 7b191dec6496,44169,1732724452967, state=OPEN 2024-11-27T16:20:56,111 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44169-0x10039c8a1050001, quorum=127.0.0.1:51088, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-27T16:20:56,111 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41377-0x10039c8a1050000, quorum=127.0.0.1:51088, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-27T16:20:56,111 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-27T16:20:56,111 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-27T16:20:56,116 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=3, resume processing ppid=2 2024-11-27T16:20:56,116 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=3, ppid=2, state=SUCCESS; OpenRegionProcedure 1588230740, server=7b191dec6496,44169,1732724452967 in 356 msec 2024-11-27T16:20:56,122 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=2, resume processing ppid=1 2024-11-27T16:20:56,123 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=2, ppid=1, state=SUCCESS; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 545 msec 2024-11-27T16:20:56,129 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=1, state=SUCCESS; InitMetaProcedure table=hbase:meta in 1.1270 sec 2024-11-27T16:20:56,129 INFO [master/7b191dec6496:0:becomeActiveMaster {}] master.HMaster(1088): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1732724456129, completionTime=-1 2024-11-27T16:20:56,130 INFO [master/7b191dec6496:0:becomeActiveMaster {}] master.ServerManager(907): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-27T16:20:56,130 DEBUG [master/7b191dec6496:0:becomeActiveMaster {}] assignment.AssignmentManager(1747): Joining cluster... 2024-11-27T16:20:56,171 DEBUG [hconnection-0x52e4f869-shared-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-27T16:20:56,174 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41060, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-27T16:20:56,185 INFO [master/7b191dec6496:0:becomeActiveMaster {}] assignment.AssignmentManager(1759): Number of RegionServers=1 2024-11-27T16:20:56,186 INFO [master/7b191dec6496:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1732724516185 2024-11-27T16:20:56,186 INFO [master/7b191dec6496:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1732724576186 2024-11-27T16:20:56,186 INFO [master/7b191dec6496:0:becomeActiveMaster {}] assignment.AssignmentManager(1766): Joined the cluster in 55 msec 2024-11-27T16:20:56,207 INFO [master/7b191dec6496:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7b191dec6496,41377,1732724452229-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-27T16:20:56,207 INFO [master/7b191dec6496:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7b191dec6496,41377,1732724452229-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-27T16:20:56,208 INFO [master/7b191dec6496:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7b191dec6496,41377,1732724452229-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-27T16:20:56,209 INFO [master/7b191dec6496:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-7b191dec6496:41377, period=300000, unit=MILLISECONDS is enabled. 2024-11-27T16:20:56,209 INFO [master/7b191dec6496:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-27T16:20:56,215 DEBUG [master/7b191dec6496:0.Chore.1 {}] janitor.CatalogJanitor(179): 2024-11-27T16:20:56,217 INFO [master/7b191dec6496:0:becomeActiveMaster {}] master.TableNamespaceManager(92): Namespace table not found. Creating... 2024-11-27T16:20:56,219 INFO [master/7b191dec6496:0:becomeActiveMaster {}] master.HMaster(2425): Client=null/null create 'hbase:namespace', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '10', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-27T16:20:56,225 DEBUG [master/7b191dec6496:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=hbase:namespace 2024-11-27T16:20:56,228 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_PRE_OPERATION 2024-11-27T16:20:56,229 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:20:56,231 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-27T16:20:56,241 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073741835_1011 (size=358) 2024-11-27T16:20:56,646 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => fb56c9d94acc1b64bf2472d65ab81174, NAME => 'hbase:namespace,,1732724456218.fb56c9d94acc1b64bf2472d65ab81174.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:namespace', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '10', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59 2024-11-27T16:20:56,655 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073741836_1012 (size=42) 2024-11-27T16:20:57,058 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(894): Instantiated hbase:namespace,,1732724456218.fb56c9d94acc1b64bf2472d65ab81174.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-27T16:20:57,058 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1681): Closing fb56c9d94acc1b64bf2472d65ab81174, disabling compactions & flushes 2024-11-27T16:20:57,058 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1703): Closing region hbase:namespace,,1732724456218.fb56c9d94acc1b64bf2472d65ab81174. 2024-11-27T16:20:57,058 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:namespace,,1732724456218.fb56c9d94acc1b64bf2472d65ab81174. 2024-11-27T16:20:57,058 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on hbase:namespace,,1732724456218.fb56c9d94acc1b64bf2472d65ab81174. after waiting 0 ms 2024-11-27T16:20:57,058 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region hbase:namespace,,1732724456218.fb56c9d94acc1b64bf2472d65ab81174. 2024-11-27T16:20:57,058 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1922): Closed hbase:namespace,,1732724456218.fb56c9d94acc1b64bf2472d65ab81174. 2024-11-27T16:20:57,058 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1635): Region close journal for fb56c9d94acc1b64bf2472d65ab81174: 2024-11-27T16:20:57,061 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_ADD_TO_META 2024-11-27T16:20:57,067 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"hbase:namespace,,1732724456218.fb56c9d94acc1b64bf2472d65ab81174.","families":{"info":[{"qualifier":"regioninfo","vlen":41,"tag":[],"timestamp":"1732724457062"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732724457062"}]},"ts":"1732724457062"} 2024-11-27T16:20:57,091 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-11-27T16:20:57,093 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-27T16:20:57,096 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"hbase:namespace","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732724457094"}]},"ts":"1732724457094"} 2024-11-27T16:20:57,101 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=hbase:namespace, state=ENABLING in hbase:meta 2024-11-27T16:20:57,107 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:namespace, region=fb56c9d94acc1b64bf2472d65ab81174, ASSIGN}] 2024-11-27T16:20:57,110 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:namespace, region=fb56c9d94acc1b64bf2472d65ab81174, ASSIGN 2024-11-27T16:20:57,111 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(264): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=hbase:namespace, region=fb56c9d94acc1b64bf2472d65ab81174, ASSIGN; state=OFFLINE, location=7b191dec6496,44169,1732724452967; forceNewPlan=false, retain=false 2024-11-27T16:20:57,262 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=5 updating hbase:meta row=fb56c9d94acc1b64bf2472d65ab81174, regionState=OPENING, regionLocation=7b191dec6496,44169,1732724452967 2024-11-27T16:20:57,267 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE; OpenRegionProcedure fb56c9d94acc1b64bf2472d65ab81174, server=7b191dec6496,44169,1732724452967}] 2024-11-27T16:20:57,421 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:20:57,427 INFO [RS_OPEN_PRIORITY_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] handler.AssignRegionHandler(135): Open hbase:namespace,,1732724456218.fb56c9d94acc1b64bf2472d65ab81174. 2024-11-27T16:20:57,428 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7285): Opening region: {ENCODED => fb56c9d94acc1b64bf2472d65ab81174, NAME => 'hbase:namespace,,1732724456218.fb56c9d94acc1b64bf2472d65ab81174.', STARTKEY => '', ENDKEY => ''} 2024-11-27T16:20:57,428 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table namespace fb56c9d94acc1b64bf2472d65ab81174 2024-11-27T16:20:57,428 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(894): Instantiated hbase:namespace,,1732724456218.fb56c9d94acc1b64bf2472d65ab81174.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-27T16:20:57,428 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7327): checking encryption for fb56c9d94acc1b64bf2472d65ab81174 2024-11-27T16:20:57,429 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7330): checking classloading for fb56c9d94acc1b64bf2472d65ab81174 2024-11-27T16:20:57,431 INFO [StoreOpener-fb56c9d94acc1b64bf2472d65ab81174-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region fb56c9d94acc1b64bf2472d65ab81174 2024-11-27T16:20:57,433 INFO [StoreOpener-fb56c9d94acc1b64bf2472d65ab81174-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region fb56c9d94acc1b64bf2472d65ab81174 columnFamilyName info 2024-11-27T16:20:57,434 DEBUG [StoreOpener-fb56c9d94acc1b64bf2472d65ab81174-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:20:57,434 INFO [StoreOpener-fb56c9d94acc1b64bf2472d65ab81174-1 {}] regionserver.HStore(327): Store=fb56c9d94acc1b64bf2472d65ab81174/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-27T16:20:57,436 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/hbase/namespace/fb56c9d94acc1b64bf2472d65ab81174 2024-11-27T16:20:57,437 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/hbase/namespace/fb56c9d94acc1b64bf2472d65ab81174 2024-11-27T16:20:57,440 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1085): writing seq id for fb56c9d94acc1b64bf2472d65ab81174 2024-11-27T16:20:57,443 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/hbase/namespace/fb56c9d94acc1b64bf2472d65ab81174/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-27T16:20:57,444 INFO [RS_OPEN_PRIORITY_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1102): Opened fb56c9d94acc1b64bf2472d65ab81174; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=63751287, jitterRate=-0.05003179609775543}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-27T16:20:57,446 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1001): Region open journal for fb56c9d94acc1b64bf2472d65ab81174: 2024-11-27T16:20:57,448 INFO [RS_OPEN_PRIORITY_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegionServer(2601): Post open deploy tasks for hbase:namespace,,1732724456218.fb56c9d94acc1b64bf2472d65ab81174., pid=6, masterSystemTime=1732724457421 2024-11-27T16:20:57,451 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegionServer(2628): Finished post open deploy task for hbase:namespace,,1732724456218.fb56c9d94acc1b64bf2472d65ab81174. 2024-11-27T16:20:57,451 INFO [RS_OPEN_PRIORITY_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] handler.AssignRegionHandler(164): Opened hbase:namespace,,1732724456218.fb56c9d94acc1b64bf2472d65ab81174. 2024-11-27T16:20:57,452 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=5 updating hbase:meta row=fb56c9d94acc1b64bf2472d65ab81174, regionState=OPEN, openSeqNum=2, regionLocation=7b191dec6496,44169,1732724452967 2024-11-27T16:20:57,459 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=6, resume processing ppid=5 2024-11-27T16:20:57,460 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=6, ppid=5, state=SUCCESS; OpenRegionProcedure fb56c9d94acc1b64bf2472d65ab81174, server=7b191dec6496,44169,1732724452967 in 189 msec 2024-11-27T16:20:57,463 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=5, resume processing ppid=4 2024-11-27T16:20:57,463 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=5, ppid=4, state=SUCCESS; TransitRegionStateProcedure table=hbase:namespace, region=fb56c9d94acc1b64bf2472d65ab81174, ASSIGN in 352 msec 2024-11-27T16:20:57,464 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-27T16:20:57,464 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"hbase:namespace","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732724457464"}]},"ts":"1732724457464"} 2024-11-27T16:20:57,467 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=hbase:namespace, state=ENABLED in hbase:meta 2024-11-27T16:20:57,470 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_POST_OPERATION 2024-11-27T16:20:57,473 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=4, state=SUCCESS; CreateTableProcedure table=hbase:namespace in 1.2520 sec 2024-11-27T16:20:57,529 DEBUG [master/7b191dec6496:0:becomeActiveMaster {}] zookeeper.ZKUtil(113): master:41377-0x10039c8a1050000, quorum=127.0.0.1:51088, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/namespace 2024-11-27T16:20:57,531 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41377-0x10039c8a1050000, quorum=127.0.0.1:51088, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/namespace 2024-11-27T16:20:57,531 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44169-0x10039c8a1050001, quorum=127.0.0.1:51088, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-27T16:20:57,531 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41377-0x10039c8a1050000, quorum=127.0.0.1:51088, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-27T16:20:57,562 DEBUG [master/7b191dec6496:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=7, state=RUNNABLE:CREATE_NAMESPACE_PREPARE; CreateNamespaceProcedure, namespace=default 2024-11-27T16:20:57,576 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41377-0x10039c8a1050000, quorum=127.0.0.1:51088, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/namespace 2024-11-27T16:20:57,582 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=7, state=SUCCESS; CreateNamespaceProcedure, namespace=default in 23 msec 2024-11-27T16:20:57,586 DEBUG [master/7b191dec6496:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=8, state=RUNNABLE:CREATE_NAMESPACE_PREPARE; CreateNamespaceProcedure, namespace=hbase 2024-11-27T16:20:57,596 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41377-0x10039c8a1050000, quorum=127.0.0.1:51088, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/namespace 2024-11-27T16:20:57,600 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=8, state=SUCCESS; CreateNamespaceProcedure, namespace=hbase in 13 msec 2024-11-27T16:20:57,611 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41377-0x10039c8a1050000, quorum=127.0.0.1:51088, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/namespace/default 2024-11-27T16:20:57,614 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41377-0x10039c8a1050000, quorum=127.0.0.1:51088, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/namespace/hbase 2024-11-27T16:20:57,614 INFO [master/7b191dec6496:0:becomeActiveMaster {}] master.HMaster(1218): Master has completed initialization 4.556sec 2024-11-27T16:20:57,616 INFO [master/7b191dec6496:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-27T16:20:57,617 INFO [master/7b191dec6496:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-27T16:20:57,618 INFO [master/7b191dec6496:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-27T16:20:57,618 INFO [master/7b191dec6496:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-27T16:20:57,618 INFO [master/7b191dec6496:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-27T16:20:57,619 INFO [master/7b191dec6496:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7b191dec6496,41377,1732724452229-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-27T16:20:57,620 INFO [master/7b191dec6496:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7b191dec6496,41377,1732724452229-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-27T16:20:57,627 DEBUG [master/7b191dec6496:0:becomeActiveMaster {}] master.HMaster(1321): Balancer post startup initialization complete, took 0 seconds 2024-11-27T16:20:57,628 INFO [master/7b191dec6496:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-27T16:20:57,628 INFO [master/7b191dec6496:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7b191dec6496,41377,1732724452229-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-27T16:20:57,631 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x0870ca2a to 127.0.0.1:51088 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@63dfbe60 2024-11-27T16:20:57,631 WARN [Time-limited test {}] client.ZKConnectionRegistry(90): ZKConnectionRegistry is deprecated. See https://hbase.apache.org/book.html#client.rpcconnectionregistry 2024-11-27T16:20:57,638 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@430e71de, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-27T16:20:57,641 DEBUG [Time-limited test {}] nio.NioEventLoop(110): -Dio.netty.noKeySetOptimization: false 2024-11-27T16:20:57,641 DEBUG [Time-limited test {}] nio.NioEventLoop(111): -Dio.netty.selectorAutoRebuildThreshold: 512 2024-11-27T16:20:57,651 DEBUG [hconnection-0x6449c4c6-shared-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-27T16:20:57,660 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41076, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-27T16:20:57,669 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1199): Minicluster is up; activeMaster=7b191dec6496,41377,1732724452229 2024-11-27T16:20:57,686 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: TestAcidGuaranteesWithBasicPolicy#testMixedAtomicity Thread=219, OpenFileDescriptor=444, MaxFileDescriptor=1048576, SystemLoadAverage=235, ProcessCount=11, AvailableMemoryMB=5086 2024-11-27T16:20:57,715 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-27T16:20:57,719 INFO [RS-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45116, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-27T16:20:57,728 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-11-27T16:20:57,733 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'BASIC'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-27T16:20:57,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] procedure2.ProcedureExecutor(1098): Stored pid=9, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestAcidGuarantees 2024-11-27T16:20:57,738 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_PRE_OPERATION 2024-11-27T16:20:57,739 DEBUG [PEWorker-2 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:20:57,741 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-27T16:20:57,741 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestAcidGuarantees" procId is: 9 2024-11-27T16:20:57,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-11-27T16:20:57,758 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073741837_1013 (size=960) 2024-11-27T16:20:57,762 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => d498a187112eb3635082ffac2dfb4cf9, NAME => 'TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'BASIC', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59 2024-11-27T16:20:57,774 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073741838_1014 (size=53) 2024-11-27T16:20:57,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-11-27T16:20:58,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-11-27T16:20:58,176 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-27T16:20:58,177 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1681): Closing d498a187112eb3635082ffac2dfb4cf9, disabling compactions & flushes 2024-11-27T16:20:58,177 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9. 2024-11-27T16:20:58,177 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9. 2024-11-27T16:20:58,177 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9. after waiting 0 ms 2024-11-27T16:20:58,177 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9. 2024-11-27T16:20:58,177 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9. 2024-11-27T16:20:58,177 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1635): Region close journal for d498a187112eb3635082ffac2dfb4cf9: 2024-11-27T16:20:58,180 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ADD_TO_META 2024-11-27T16:20:58,180 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.","families":{"info":[{"qualifier":"regioninfo","vlen":52,"tag":[],"timestamp":"1732724458180"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732724458180"}]},"ts":"1732724458180"} 2024-11-27T16:20:58,183 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-11-27T16:20:58,185 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-27T16:20:58,185 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732724458185"}]},"ts":"1732724458185"} 2024-11-27T16:20:58,188 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLING in hbase:meta 2024-11-27T16:20:58,193 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=d498a187112eb3635082ffac2dfb4cf9, ASSIGN}] 2024-11-27T16:20:58,195 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=d498a187112eb3635082ffac2dfb4cf9, ASSIGN 2024-11-27T16:20:58,196 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(264): Starting pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=d498a187112eb3635082ffac2dfb4cf9, ASSIGN; state=OFFLINE, location=7b191dec6496,44169,1732724452967; forceNewPlan=false, retain=false 2024-11-27T16:20:58,347 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=10 updating hbase:meta row=d498a187112eb3635082ffac2dfb4cf9, regionState=OPENING, regionLocation=7b191dec6496,44169,1732724452967 2024-11-27T16:20:58,351 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=11, ppid=10, state=RUNNABLE; OpenRegionProcedure d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967}] 2024-11-27T16:20:58,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-11-27T16:20:58,504 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:20:58,512 INFO [RS_OPEN_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9. 2024-11-27T16:20:58,512 DEBUG [RS_OPEN_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7285): Opening region: {ENCODED => d498a187112eb3635082ffac2dfb4cf9, NAME => 'TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.', STARTKEY => '', ENDKEY => ''} 2024-11-27T16:20:58,512 DEBUG [RS_OPEN_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees d498a187112eb3635082ffac2dfb4cf9 2024-11-27T16:20:58,512 DEBUG [RS_OPEN_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-27T16:20:58,513 DEBUG [RS_OPEN_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7327): checking encryption for d498a187112eb3635082ffac2dfb4cf9 2024-11-27T16:20:58,513 DEBUG [RS_OPEN_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7330): checking classloading for d498a187112eb3635082ffac2dfb4cf9 2024-11-27T16:20:58,515 INFO [StoreOpener-d498a187112eb3635082ffac2dfb4cf9-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region d498a187112eb3635082ffac2dfb4cf9 2024-11-27T16:20:58,518 INFO [StoreOpener-d498a187112eb3635082ffac2dfb4cf9-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-27T16:20:58,518 INFO [StoreOpener-d498a187112eb3635082ffac2dfb4cf9-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region d498a187112eb3635082ffac2dfb4cf9 columnFamilyName A 2024-11-27T16:20:58,518 DEBUG [StoreOpener-d498a187112eb3635082ffac2dfb4cf9-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:20:58,519 INFO [StoreOpener-d498a187112eb3635082ffac2dfb4cf9-1 {}] regionserver.HStore(327): Store=d498a187112eb3635082ffac2dfb4cf9/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-27T16:20:58,520 INFO [StoreOpener-d498a187112eb3635082ffac2dfb4cf9-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region d498a187112eb3635082ffac2dfb4cf9 2024-11-27T16:20:58,521 INFO [StoreOpener-d498a187112eb3635082ffac2dfb4cf9-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-27T16:20:58,522 INFO [StoreOpener-d498a187112eb3635082ffac2dfb4cf9-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region d498a187112eb3635082ffac2dfb4cf9 columnFamilyName B 2024-11-27T16:20:58,522 DEBUG [StoreOpener-d498a187112eb3635082ffac2dfb4cf9-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:20:58,522 INFO [StoreOpener-d498a187112eb3635082ffac2dfb4cf9-1 {}] regionserver.HStore(327): Store=d498a187112eb3635082ffac2dfb4cf9/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-27T16:20:58,522 INFO [StoreOpener-d498a187112eb3635082ffac2dfb4cf9-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region d498a187112eb3635082ffac2dfb4cf9 2024-11-27T16:20:58,524 INFO [StoreOpener-d498a187112eb3635082ffac2dfb4cf9-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-27T16:20:58,524 INFO [StoreOpener-d498a187112eb3635082ffac2dfb4cf9-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region d498a187112eb3635082ffac2dfb4cf9 columnFamilyName C 2024-11-27T16:20:58,525 DEBUG [StoreOpener-d498a187112eb3635082ffac2dfb4cf9-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:20:58,525 INFO [StoreOpener-d498a187112eb3635082ffac2dfb4cf9-1 {}] regionserver.HStore(327): Store=d498a187112eb3635082ffac2dfb4cf9/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-27T16:20:58,526 INFO [RS_OPEN_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9. 2024-11-27T16:20:58,527 DEBUG [RS_OPEN_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9 2024-11-27T16:20:58,527 DEBUG [RS_OPEN_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9 2024-11-27T16:20:58,530 DEBUG [RS_OPEN_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-27T16:20:58,532 DEBUG [RS_OPEN_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1085): writing seq id for d498a187112eb3635082ffac2dfb4cf9 2024-11-27T16:20:58,535 DEBUG [RS_OPEN_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-27T16:20:58,536 INFO [RS_OPEN_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1102): Opened d498a187112eb3635082ffac2dfb4cf9; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=73719314, jitterRate=0.09850338101387024}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-27T16:20:58,537 DEBUG [RS_OPEN_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1001): Region open journal for d498a187112eb3635082ffac2dfb4cf9: 2024-11-27T16:20:58,538 INFO [RS_OPEN_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9., pid=11, masterSystemTime=1732724458504 2024-11-27T16:20:58,541 DEBUG [RS_OPEN_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9. 2024-11-27T16:20:58,541 INFO [RS_OPEN_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9. 2024-11-27T16:20:58,542 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=10 updating hbase:meta row=d498a187112eb3635082ffac2dfb4cf9, regionState=OPEN, openSeqNum=2, regionLocation=7b191dec6496,44169,1732724452967 2024-11-27T16:20:58,549 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=11, resume processing ppid=10 2024-11-27T16:20:58,549 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=11, ppid=10, state=SUCCESS; OpenRegionProcedure d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 in 194 msec 2024-11-27T16:20:58,553 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=10, resume processing ppid=9 2024-11-27T16:20:58,553 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=10, ppid=9, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=d498a187112eb3635082ffac2dfb4cf9, ASSIGN in 356 msec 2024-11-27T16:20:58,554 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-27T16:20:58,554 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732724458554"}]},"ts":"1732724458554"} 2024-11-27T16:20:58,557 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLED in hbase:meta 2024-11-27T16:20:58,561 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_POST_OPERATION 2024-11-27T16:20:58,563 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=9, state=SUCCESS; CreateTableProcedure table=TestAcidGuarantees in 828 msec 2024-11-27T16:20:58,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-11-27T16:20:58,864 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestAcidGuarantees, procId: 9 completed 2024-11-27T16:20:58,868 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x02a08c5a to 127.0.0.1:51088 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@6be4168e 2024-11-27T16:20:58,872 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4ed9b166, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-27T16:20:58,875 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-27T16:20:58,877 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41084, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-27T16:20:58,880 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-27T16:20:58,882 INFO [RS-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45126, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-27T16:20:58,891 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x24512372 to 127.0.0.1:51088 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@5038857 2024-11-27T16:20:58,895 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@27c80704, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-27T16:20:58,897 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x53623ce6 to 127.0.0.1:51088 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@4aba57ed 2024-11-27T16:20:58,900 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3f68aae6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-27T16:20:58,901 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x66d523ff to 127.0.0.1:51088 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@629b91f8 2024-11-27T16:20:58,903 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@18de28d7, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-27T16:20:58,905 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x5c9b2c1d to 127.0.0.1:51088 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@62b16227 2024-11-27T16:20:58,909 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6cb8ce8a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-27T16:20:58,911 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x0d888e3e to 127.0.0.1:51088 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@53f30e40 2024-11-27T16:20:58,914 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7915562a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-27T16:20:58,916 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x4f34c0b8 to 127.0.0.1:51088 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@691cbc80 2024-11-27T16:20:58,922 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@502730d9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-27T16:20:58,924 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x6ebb9f30 to 127.0.0.1:51088 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@62cfc6db 2024-11-27T16:20:58,927 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@b8793a3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-27T16:20:58,930 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x3f18a09d to 127.0.0.1:51088 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@8b52656 2024-11-27T16:20:58,933 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@71209fad, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-27T16:20:58,935 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x426bcd11 to 127.0.0.1:51088 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@634dc49c 2024-11-27T16:20:58,938 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1665e2af, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-27T16:20:58,946 DEBUG [hconnection-0x48af454-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-27T16:20:58,947 DEBUG [hconnection-0xb857c1a-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-27T16:20:58,947 DEBUG [hconnection-0x5d4164ec-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-27T16:20:58,947 DEBUG [hconnection-0x40eb5aeb-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-27T16:20:58,948 DEBUG [hconnection-0x59c3390f-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-27T16:20:58,949 DEBUG [hconnection-0x17b3816e-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-27T16:20:58,949 DEBUG [hconnection-0x23bc3a57-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-27T16:20:58,951 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-27T16:20:58,955 DEBUG [hconnection-0x4a22ee9a-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-27T16:20:58,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] procedure2.ProcedureExecutor(1098): Stored pid=12, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=12, table=TestAcidGuarantees 2024-11-27T16:20:58,957 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41100, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-27T16:20:58,958 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41102, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-27T16:20:58,958 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41106, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-27T16:20:58,959 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=12, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=12, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-27T16:20:58,960 DEBUG [hconnection-0x34e8847a-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-27T16:20:58,961 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=12, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=12, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-27T16:20:58,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-11-27T16:20:58,962 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=13, ppid=12, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-27T16:20:58,965 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41120, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-27T16:20:58,966 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41130, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-27T16:20:58,971 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41140, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-27T16:20:58,973 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41154, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-27T16:20:58,977 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41164, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-27T16:20:58,981 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41170, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-27T16:20:59,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(8581): Flush requested on d498a187112eb3635082ffac2dfb4cf9 2024-11-27T16:20:59,055 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing d498a187112eb3635082ffac2dfb4cf9 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-11-27T16:20:59,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-11-27T16:20:59,072 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d498a187112eb3635082ffac2dfb4cf9, store=A 2024-11-27T16:20:59,073 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:20:59,073 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d498a187112eb3635082ffac2dfb4cf9, store=B 2024-11-27T16:20:59,074 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:20:59,074 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d498a187112eb3635082ffac2dfb4cf9, store=C 2024-11-27T16:20:59,074 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:20:59,125 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:20:59,127 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-11-27T16:20:59,143 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9. 2024-11-27T16:20:59,144 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9. as already flushing 2024-11-27T16:20:59,145 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9. 2024-11-27T16:20:59,145 ERROR [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => d498a187112eb3635082ffac2dfb4cf9, NAME => 'TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:20:59,147 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => d498a187112eb3635082ffac2dfb4cf9, NAME => 'TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:20:59,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d498a187112eb3635082ffac2dfb4cf9, NAME => 'TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d498a187112eb3635082ffac2dfb4cf9, NAME => 'TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:20:59,199 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/A/4ad0db3684374730be5ea5fbdfe679a1 is 50, key is test_row_0/A:col10/1732724458967/Put/seqid=0 2024-11-27T16:20:59,228 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:20:59,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41100 deadline: 1732724519212, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 2024-11-27T16:20:59,229 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:20:59,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41130 deadline: 1732724519216, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 2024-11-27T16:20:59,236 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:20:59,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41120 deadline: 1732724519223, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 2024-11-27T16:20:59,237 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:20:59,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41106 deadline: 1732724519226, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 2024-11-27T16:20:59,238 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:20:59,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41170 deadline: 1732724519228, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 2024-11-27T16:20:59,252 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073741839_1015 (size=12001) 2024-11-27T16:20:59,255 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=15 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/A/4ad0db3684374730be5ea5fbdfe679a1 2024-11-27T16:20:59,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-11-27T16:20:59,311 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:20:59,312 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-11-27T16:20:59,315 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9. 2024-11-27T16:20:59,315 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9. as already flushing 2024-11-27T16:20:59,315 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9. 2024-11-27T16:20:59,315 ERROR [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => d498a187112eb3635082ffac2dfb4cf9, NAME => 'TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:20:59,315 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => d498a187112eb3635082ffac2dfb4cf9, NAME => 'TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:20:59,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d498a187112eb3635082ffac2dfb4cf9, NAME => 'TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d498a187112eb3635082ffac2dfb4cf9, NAME => 'TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:20:59,337 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:20:59,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41130 deadline: 1732724519336, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 2024-11-27T16:20:59,338 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:20:59,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41100 deadline: 1732724519337, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 2024-11-27T16:20:59,343 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:20:59,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41106 deadline: 1732724519342, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 2024-11-27T16:20:59,345 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:20:59,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41170 deadline: 1732724519344, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 2024-11-27T16:20:59,345 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:20:59,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41120 deadline: 1732724519344, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 2024-11-27T16:20:59,356 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/B/12c0e6483cf94472ae4f102788838dde is 50, key is test_row_0/B:col10/1732724458967/Put/seqid=0 2024-11-27T16:20:59,375 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073741840_1016 (size=12001) 2024-11-27T16:20:59,377 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=15 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/B/12c0e6483cf94472ae4f102788838dde 2024-11-27T16:20:59,424 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/C/d6b3a5cc797c4096ad6879ded24a04e5 is 50, key is test_row_0/C:col10/1732724458967/Put/seqid=0 2024-11-27T16:20:59,447 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073741841_1017 (size=12001) 2024-11-27T16:20:59,449 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=15 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/C/d6b3a5cc797c4096ad6879ded24a04e5 2024-11-27T16:20:59,463 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/A/4ad0db3684374730be5ea5fbdfe679a1 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/A/4ad0db3684374730be5ea5fbdfe679a1 2024-11-27T16:20:59,471 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:20:59,472 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-11-27T16:20:59,477 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/A/4ad0db3684374730be5ea5fbdfe679a1, entries=150, sequenceid=15, filesize=11.7 K 2024-11-27T16:20:59,478 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9. 2024-11-27T16:20:59,479 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9. as already flushing 2024-11-27T16:20:59,479 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9. 2024-11-27T16:20:59,483 ERROR [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => d498a187112eb3635082ffac2dfb4cf9, NAME => 'TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:20:59,483 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => d498a187112eb3635082ffac2dfb4cf9, NAME => 'TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:20:59,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d498a187112eb3635082ffac2dfb4cf9, NAME => 'TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d498a187112eb3635082ffac2dfb4cf9, NAME => 'TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:20:59,485 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/B/12c0e6483cf94472ae4f102788838dde as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/B/12c0e6483cf94472ae4f102788838dde 2024-11-27T16:20:59,502 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/B/12c0e6483cf94472ae4f102788838dde, entries=150, sequenceid=15, filesize=11.7 K 2024-11-27T16:20:59,508 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/C/d6b3a5cc797c4096ad6879ded24a04e5 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/C/d6b3a5cc797c4096ad6879ded24a04e5 2024-11-27T16:20:59,521 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/C/d6b3a5cc797c4096ad6879ded24a04e5, entries=150, sequenceid=15, filesize=11.7 K 2024-11-27T16:20:59,523 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=134.18 KB/137400 for d498a187112eb3635082ffac2dfb4cf9 in 468ms, sequenceid=15, compaction requested=false 2024-11-27T16:20:59,525 DEBUG [MemStoreFlusher.0 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestAcidGuarantees' 2024-11-27T16:20:59,527 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for d498a187112eb3635082ffac2dfb4cf9: 2024-11-27T16:20:59,550 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing d498a187112eb3635082ffac2dfb4cf9 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-11-27T16:20:59,550 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d498a187112eb3635082ffac2dfb4cf9, store=A 2024-11-27T16:20:59,550 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:20:59,550 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d498a187112eb3635082ffac2dfb4cf9, store=B 2024-11-27T16:20:59,551 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:20:59,551 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d498a187112eb3635082ffac2dfb4cf9, store=C 2024-11-27T16:20:59,551 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:20:59,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(8581): Flush requested on d498a187112eb3635082ffac2dfb4cf9 2024-11-27T16:20:59,568 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/A/712c1f3117ab46179edabc79dacbd358 is 50, key is test_row_0/A:col10/1732724459201/Put/seqid=0 2024-11-27T16:20:59,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-11-27T16:20:59,599 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:20:59,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41170 deadline: 1732724519583, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 2024-11-27T16:20:59,601 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:20:59,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41100 deadline: 1732724519591, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 2024-11-27T16:20:59,603 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:20:59,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41120 deadline: 1732724519591, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 2024-11-27T16:20:59,606 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073741842_1018 (size=14341) 2024-11-27T16:20:59,610 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:20:59,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41106 deadline: 1732724519597, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 2024-11-27T16:20:59,611 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:20:59,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41130 deadline: 1732724519601, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 2024-11-27T16:20:59,637 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:20:59,638 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-11-27T16:20:59,638 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9. 2024-11-27T16:20:59,639 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9. as already flushing 2024-11-27T16:20:59,639 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9. 2024-11-27T16:20:59,639 ERROR [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => d498a187112eb3635082ffac2dfb4cf9, NAME => 'TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:20:59,639 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => d498a187112eb3635082ffac2dfb4cf9, NAME => 'TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:20:59,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d498a187112eb3635082ffac2dfb4cf9, NAME => 'TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d498a187112eb3635082ffac2dfb4cf9, NAME => 'TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:20:59,715 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:20:59,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41120 deadline: 1732724519707, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 2024-11-27T16:20:59,717 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:20:59,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41170 deadline: 1732724519708, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 2024-11-27T16:20:59,718 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:20:59,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41100 deadline: 1732724519710, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 2024-11-27T16:20:59,727 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:20:59,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41106 deadline: 1732724519714, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 2024-11-27T16:20:59,728 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:20:59,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41130 deadline: 1732724519715, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 2024-11-27T16:20:59,792 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:20:59,793 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-11-27T16:20:59,794 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9. 2024-11-27T16:20:59,794 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9. as already flushing 2024-11-27T16:20:59,794 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9. 2024-11-27T16:20:59,794 ERROR [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => d498a187112eb3635082ffac2dfb4cf9, NAME => 'TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:20:59,794 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => d498a187112eb3635082ffac2dfb4cf9, NAME => 'TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:20:59,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d498a187112eb3635082ffac2dfb4cf9, NAME => 'TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d498a187112eb3635082ffac2dfb4cf9, NAME => 'TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:20:59,940 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:20:59,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41100 deadline: 1732724519922, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 2024-11-27T16:20:59,941 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:20:59,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41170 deadline: 1732724519922, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 2024-11-27T16:20:59,947 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:20:59,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41120 deadline: 1732724519922, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 2024-11-27T16:20:59,948 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:20:59,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41106 deadline: 1732724519934, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 2024-11-27T16:20:59,948 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:20:59,949 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:20:59,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41130 deadline: 1732724519934, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 2024-11-27T16:20:59,949 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-11-27T16:20:59,950 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9. 2024-11-27T16:20:59,950 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9. as already flushing 2024-11-27T16:20:59,950 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9. 2024-11-27T16:20:59,950 ERROR [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => d498a187112eb3635082ffac2dfb4cf9, NAME => 'TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:20:59,951 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => d498a187112eb3635082ffac2dfb4cf9, NAME => 'TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:20:59,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d498a187112eb3635082ffac2dfb4cf9, NAME => 'TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d498a187112eb3635082ffac2dfb4cf9, NAME => 'TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:21:00,008 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=39 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/A/712c1f3117ab46179edabc79dacbd358 2024-11-27T16:21:00,033 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/B/d0c4de35809c44a484ab80f9d73bf3ae is 50, key is test_row_0/B:col10/1732724459201/Put/seqid=0 2024-11-27T16:21:00,061 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073741843_1019 (size=12001) 2024-11-27T16:21:00,064 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=39 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/B/d0c4de35809c44a484ab80f9d73bf3ae 2024-11-27T16:21:00,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-11-27T16:21:00,091 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/C/f62c916a86e44395a691a5aa77f3d863 is 50, key is test_row_0/C:col10/1732724459201/Put/seqid=0 2024-11-27T16:21:00,103 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073741844_1020 (size=12001) 2024-11-27T16:21:00,105 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:21:00,106 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-11-27T16:21:00,106 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9. 2024-11-27T16:21:00,107 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9. as already flushing 2024-11-27T16:21:00,107 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9. 2024-11-27T16:21:00,107 ERROR [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => d498a187112eb3635082ffac2dfb4cf9, NAME => 'TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:21:00,107 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => d498a187112eb3635082ffac2dfb4cf9, NAME => 'TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:21:00,108 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=39 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/C/f62c916a86e44395a691a5aa77f3d863 2024-11-27T16:21:00,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d498a187112eb3635082ffac2dfb4cf9, NAME => 'TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d498a187112eb3635082ffac2dfb4cf9, NAME => 'TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:21:00,128 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/A/712c1f3117ab46179edabc79dacbd358 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/A/712c1f3117ab46179edabc79dacbd358 2024-11-27T16:21:00,140 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/A/712c1f3117ab46179edabc79dacbd358, entries=200, sequenceid=39, filesize=14.0 K 2024-11-27T16:21:00,143 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/B/d0c4de35809c44a484ab80f9d73bf3ae as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/B/d0c4de35809c44a484ab80f9d73bf3ae 2024-11-27T16:21:00,155 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/B/d0c4de35809c44a484ab80f9d73bf3ae, entries=150, sequenceid=39, filesize=11.7 K 2024-11-27T16:21:00,157 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/C/f62c916a86e44395a691a5aa77f3d863 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/C/f62c916a86e44395a691a5aa77f3d863 2024-11-27T16:21:00,176 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/C/f62c916a86e44395a691a5aa77f3d863, entries=150, sequenceid=39, filesize=11.7 K 2024-11-27T16:21:00,179 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=60.38 KB/61830 for d498a187112eb3635082ffac2dfb4cf9 in 630ms, sequenceid=39, compaction requested=false 2024-11-27T16:21:00,179 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for d498a187112eb3635082ffac2dfb4cf9: 2024-11-27T16:21:00,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(8581): Flush requested on d498a187112eb3635082ffac2dfb4cf9 2024-11-27T16:21:00,260 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing d498a187112eb3635082ffac2dfb4cf9 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-11-27T16:21:00,260 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d498a187112eb3635082ffac2dfb4cf9, store=A 2024-11-27T16:21:00,261 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:21:00,261 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d498a187112eb3635082ffac2dfb4cf9, store=B 2024-11-27T16:21:00,261 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:21:00,261 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d498a187112eb3635082ffac2dfb4cf9, store=C 2024-11-27T16:21:00,261 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:21:00,261 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:21:00,264 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-11-27T16:21:00,264 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9. 2024-11-27T16:21:00,264 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9. as already flushing 2024-11-27T16:21:00,264 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9. 2024-11-27T16:21:00,264 ERROR [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => d498a187112eb3635082ffac2dfb4cf9, NAME => 'TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:21:00,265 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => d498a187112eb3635082ffac2dfb4cf9, NAME => 'TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:21:00,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d498a187112eb3635082ffac2dfb4cf9, NAME => 'TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d498a187112eb3635082ffac2dfb4cf9, NAME => 'TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:21:00,281 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/A/619602cf0e7c4a50991dd3b94d0515e6 is 50, key is test_row_0/A:col10/1732724459556/Put/seqid=0 2024-11-27T16:21:00,319 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073741845_1021 (size=16681) 2024-11-27T16:21:00,321 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=53 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/A/619602cf0e7c4a50991dd3b94d0515e6 2024-11-27T16:21:00,356 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/B/49a95b0a4f3a45e9a54e335e4fd1e317 is 50, key is test_row_0/B:col10/1732724459556/Put/seqid=0 2024-11-27T16:21:00,362 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:00,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41106 deadline: 1732724520342, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:00,370 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:00,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41130 deadline: 1732724520345, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:00,371 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:00,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41170 deadline: 1732724520347, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:00,374 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:00,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41100 deadline: 1732724520353, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:00,375 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:00,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41120 deadline: 1732724520363, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:00,394 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073741846_1022 (size=12001) 2024-11-27T16:21:00,418 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:21:00,419 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-11-27T16:21:00,419 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9. 2024-11-27T16:21:00,420 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9. as already flushing 2024-11-27T16:21:00,420 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9. 2024-11-27T16:21:00,420 ERROR [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => d498a187112eb3635082ffac2dfb4cf9, NAME => 'TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:21:00,420 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => d498a187112eb3635082ffac2dfb4cf9, NAME => 'TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:21:00,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d498a187112eb3635082ffac2dfb4cf9, NAME => 'TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d498a187112eb3635082ffac2dfb4cf9, NAME => 'TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:21:00,468 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:00,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41106 deadline: 1732724520466, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:00,481 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:00,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41130 deadline: 1732724520473, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:00,485 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:00,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41170 deadline: 1732724520477, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:00,486 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:00,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41120 deadline: 1732724520479, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:00,486 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:00,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41100 deadline: 1732724520478, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:00,574 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:21:00,575 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-11-27T16:21:00,575 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9. 2024-11-27T16:21:00,575 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9. as already flushing 2024-11-27T16:21:00,576 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9. 2024-11-27T16:21:00,576 ERROR [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => d498a187112eb3635082ffac2dfb4cf9, NAME => 'TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:21:00,576 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => d498a187112eb3635082ffac2dfb4cf9, NAME => 'TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:21:00,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d498a187112eb3635082ffac2dfb4cf9, NAME => 'TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d498a187112eb3635082ffac2dfb4cf9, NAME => 'TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:21:00,676 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:00,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41106 deadline: 1732724520673, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:00,689 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:00,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41130 deadline: 1732724520685, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:00,690 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:00,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41170 deadline: 1732724520689, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:00,692 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:00,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41100 deadline: 1732724520691, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:00,694 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:00,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41120 deadline: 1732724520692, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:00,732 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:21:00,734 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-11-27T16:21:00,734 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9. 2024-11-27T16:21:00,735 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9. as already flushing 2024-11-27T16:21:00,735 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9. 2024-11-27T16:21:00,735 ERROR [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => d498a187112eb3635082ffac2dfb4cf9, NAME => 'TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:21:00,735 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => d498a187112eb3635082ffac2dfb4cf9, NAME => 'TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:21:00,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d498a187112eb3635082ffac2dfb4cf9, NAME => 'TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d498a187112eb3635082ffac2dfb4cf9, NAME => 'TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:21:00,796 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=53 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/B/49a95b0a4f3a45e9a54e335e4fd1e317 2024-11-27T16:21:00,818 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/C/36a06d5a83e94c1ea7bb5c7cd76c0c57 is 50, key is test_row_0/C:col10/1732724459556/Put/seqid=0 2024-11-27T16:21:00,850 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073741847_1023 (size=12001) 2024-11-27T16:21:00,852 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=53 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/C/36a06d5a83e94c1ea7bb5c7cd76c0c57 2024-11-27T16:21:00,864 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/A/619602cf0e7c4a50991dd3b94d0515e6 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/A/619602cf0e7c4a50991dd3b94d0515e6 2024-11-27T16:21:00,879 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/A/619602cf0e7c4a50991dd3b94d0515e6, entries=250, sequenceid=53, filesize=16.3 K 2024-11-27T16:21:00,884 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/B/49a95b0a4f3a45e9a54e335e4fd1e317 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/B/49a95b0a4f3a45e9a54e335e4fd1e317 2024-11-27T16:21:00,889 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:21:00,890 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-11-27T16:21:00,890 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9. 2024-11-27T16:21:00,890 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9. as already flushing 2024-11-27T16:21:00,891 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9. 2024-11-27T16:21:00,891 ERROR [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => d498a187112eb3635082ffac2dfb4cf9, NAME => 'TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:21:00,891 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => d498a187112eb3635082ffac2dfb4cf9, NAME => 'TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:21:00,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d498a187112eb3635082ffac2dfb4cf9, NAME => 'TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d498a187112eb3635082ffac2dfb4cf9, NAME => 'TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:21:00,900 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/B/49a95b0a4f3a45e9a54e335e4fd1e317, entries=150, sequenceid=53, filesize=11.7 K 2024-11-27T16:21:00,902 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/C/36a06d5a83e94c1ea7bb5c7cd76c0c57 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/C/36a06d5a83e94c1ea7bb5c7cd76c0c57 2024-11-27T16:21:00,914 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/C/36a06d5a83e94c1ea7bb5c7cd76c0c57, entries=150, sequenceid=53, filesize=11.7 K 2024-11-27T16:21:00,917 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=127.47 KB/130530 for d498a187112eb3635082ffac2dfb4cf9 in 657ms, sequenceid=53, compaction requested=true 2024-11-27T16:21:00,917 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for d498a187112eb3635082ffac2dfb4cf9: 2024-11-27T16:21:00,920 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d498a187112eb3635082ffac2dfb4cf9:A, priority=-2147483648, current under compaction store size is 1 2024-11-27T16:21:00,920 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T16:21:00,920 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d498a187112eb3635082ffac2dfb4cf9:B, priority=-2147483648, current under compaction store size is 2 2024-11-27T16:21:00,920 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T16:21:00,920 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-27T16:21:00,920 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d498a187112eb3635082ffac2dfb4cf9:C, priority=-2147483648, current under compaction store size is 3 2024-11-27T16:21:00,920 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-27T16:21:00,920 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-27T16:21:00,929 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-27T16:21:00,931 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 43023 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-27T16:21:00,931 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HStore(1540): d498a187112eb3635082ffac2dfb4cf9/B is initiating minor compaction (all files) 2024-11-27T16:21:00,931 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HStore(1540): d498a187112eb3635082ffac2dfb4cf9/A is initiating minor compaction (all files) 2024-11-27T16:21:00,931 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d498a187112eb3635082ffac2dfb4cf9/A in TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9. 2024-11-27T16:21:00,931 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d498a187112eb3635082ffac2dfb4cf9/B in TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9. 2024-11-27T16:21:00,932 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/A/4ad0db3684374730be5ea5fbdfe679a1, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/A/712c1f3117ab46179edabc79dacbd358, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/A/619602cf0e7c4a50991dd3b94d0515e6] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp, totalSize=42.0 K 2024-11-27T16:21:00,932 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/B/12c0e6483cf94472ae4f102788838dde, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/B/d0c4de35809c44a484ab80f9d73bf3ae, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/B/49a95b0a4f3a45e9a54e335e4fd1e317] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp, totalSize=35.2 K 2024-11-27T16:21:00,933 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting 12c0e6483cf94472ae4f102788838dde, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=15, earliestPutTs=1732724458967 2024-11-27T16:21:00,934 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4ad0db3684374730be5ea5fbdfe679a1, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=15, earliestPutTs=1732724458967 2024-11-27T16:21:00,935 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 712c1f3117ab46179edabc79dacbd358, keycount=200, bloomtype=ROW, size=14.0 K, encoding=NONE, compression=NONE, seqNum=39, earliestPutTs=1732724459201 2024-11-27T16:21:00,935 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting d0c4de35809c44a484ab80f9d73bf3ae, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=39, earliestPutTs=1732724459201 2024-11-27T16:21:00,936 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 619602cf0e7c4a50991dd3b94d0515e6, keycount=250, bloomtype=ROW, size=16.3 K, encoding=NONE, compression=NONE, seqNum=53, earliestPutTs=1732724459556 2024-11-27T16:21:00,937 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting 49a95b0a4f3a45e9a54e335e4fd1e317, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=53, earliestPutTs=1732724459556 2024-11-27T16:21:00,990 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): d498a187112eb3635082ffac2dfb4cf9#B#compaction#9 average throughput is 0.94 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-27T16:21:00,991 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/B/f4638405c4244a5fa4e71fc6974b30d0 is 50, key is test_row_0/B:col10/1732724459556/Put/seqid=0 2024-11-27T16:21:01,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(8581): Flush requested on d498a187112eb3635082ffac2dfb4cf9 2024-11-27T16:21:01,004 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing d498a187112eb3635082ffac2dfb4cf9 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-11-27T16:21:01,005 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d498a187112eb3635082ffac2dfb4cf9, store=A 2024-11-27T16:21:01,005 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:21:01,005 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d498a187112eb3635082ffac2dfb4cf9, store=B 2024-11-27T16:21:01,005 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:21:01,005 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d498a187112eb3635082ffac2dfb4cf9, store=C 2024-11-27T16:21:01,005 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:21:01,006 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): d498a187112eb3635082ffac2dfb4cf9#A#compaction#10 average throughput is 1.31 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-27T16:21:01,007 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/A/e8c8fe1362c24e8aaf1e7ec1a25bcb1a is 50, key is test_row_0/A:col10/1732724459556/Put/seqid=0 2024-11-27T16:21:01,032 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:01,033 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073741848_1024 (size=12104) 2024-11-27T16:21:01,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41130 deadline: 1732724521024, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:01,034 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:01,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41170 deadline: 1732724521026, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:01,034 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:01,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41100 deadline: 1732724521028, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:01,035 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:01,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41106 deadline: 1732724521035, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:01,039 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:01,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41120 deadline: 1732724521031, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:01,045 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:21:01,046 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-11-27T16:21:01,046 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9. 2024-11-27T16:21:01,046 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9. as already flushing 2024-11-27T16:21:01,046 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9. 2024-11-27T16:21:01,046 ERROR [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => d498a187112eb3635082ffac2dfb4cf9, NAME => 'TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:21:01,046 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => d498a187112eb3635082ffac2dfb4cf9, NAME => 'TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:21:01,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d498a187112eb3635082ffac2dfb4cf9, NAME => 'TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d498a187112eb3635082ffac2dfb4cf9, NAME => 'TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:21:01,057 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/A/5f59276818374ce68a5bebb41171ea9b is 50, key is test_row_0/A:col10/1732724461001/Put/seqid=0 2024-11-27T16:21:01,070 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073741849_1025 (size=12104) 2024-11-27T16:21:01,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-11-27T16:21:01,084 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/A/e8c8fe1362c24e8aaf1e7ec1a25bcb1a as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/A/e8c8fe1362c24e8aaf1e7ec1a25bcb1a 2024-11-27T16:21:01,106 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d498a187112eb3635082ffac2dfb4cf9/A of d498a187112eb3635082ffac2dfb4cf9 into e8c8fe1362c24e8aaf1e7ec1a25bcb1a(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T16:21:01,107 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d498a187112eb3635082ffac2dfb4cf9: 2024-11-27T16:21:01,107 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9., storeName=d498a187112eb3635082ffac2dfb4cf9/A, priority=13, startTime=1732724460919; duration=0sec 2024-11-27T16:21:01,107 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-27T16:21:01,107 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d498a187112eb3635082ffac2dfb4cf9:A 2024-11-27T16:21:01,107 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-27T16:21:01,111 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-27T16:21:01,111 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HStore(1540): d498a187112eb3635082ffac2dfb4cf9/C is initiating minor compaction (all files) 2024-11-27T16:21:01,111 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d498a187112eb3635082ffac2dfb4cf9/C in TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9. 2024-11-27T16:21:01,112 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/C/d6b3a5cc797c4096ad6879ded24a04e5, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/C/f62c916a86e44395a691a5aa77f3d863, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/C/36a06d5a83e94c1ea7bb5c7cd76c0c57] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp, totalSize=35.2 K 2024-11-27T16:21:01,113 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.Compactor(224): Compacting d6b3a5cc797c4096ad6879ded24a04e5, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=15, earliestPutTs=1732724458967 2024-11-27T16:21:01,116 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.Compactor(224): Compacting f62c916a86e44395a691a5aa77f3d863, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=39, earliestPutTs=1732724459201 2024-11-27T16:21:01,119 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 36a06d5a83e94c1ea7bb5c7cd76c0c57, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=53, earliestPutTs=1732724459556 2024-11-27T16:21:01,119 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073741850_1026 (size=16681) 2024-11-27T16:21:01,120 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=77 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/A/5f59276818374ce68a5bebb41171ea9b 2024-11-27T16:21:01,141 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:01,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41130 deadline: 1732724521136, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:01,142 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/B/c33b6a2f1e4840f394d5f11471ac8a8e is 50, key is test_row_0/B:col10/1732724461001/Put/seqid=0 2024-11-27T16:21:01,144 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:01,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41170 deadline: 1732724521137, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:01,145 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:01,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41100 deadline: 1732724521137, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:01,146 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:01,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41106 deadline: 1732724521138, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:01,150 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:01,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41120 deadline: 1732724521143, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:01,154 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): d498a187112eb3635082ffac2dfb4cf9#C#compaction#13 average throughput is 2.18 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-27T16:21:01,155 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/C/e21547f1576a426097b2ea981de51e60 is 50, key is test_row_0/C:col10/1732724459556/Put/seqid=0 2024-11-27T16:21:01,179 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073741851_1027 (size=12001) 2024-11-27T16:21:01,181 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=77 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/B/c33b6a2f1e4840f394d5f11471ac8a8e 2024-11-27T16:21:01,200 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:21:01,201 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-11-27T16:21:01,201 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9. 2024-11-27T16:21:01,201 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9. as already flushing 2024-11-27T16:21:01,201 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9. 2024-11-27T16:21:01,201 ERROR [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => d498a187112eb3635082ffac2dfb4cf9, NAME => 'TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:21:01,202 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => d498a187112eb3635082ffac2dfb4cf9, NAME => 'TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:21:01,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d498a187112eb3635082ffac2dfb4cf9, NAME => 'TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d498a187112eb3635082ffac2dfb4cf9, NAME => 'TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:21:01,203 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/C/0dd56f2ddbe54ca8b5597a08be3d354f is 50, key is test_row_0/C:col10/1732724461001/Put/seqid=0 2024-11-27T16:21:01,206 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073741852_1028 (size=12104) 2024-11-27T16:21:01,249 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073741853_1029 (size=12001) 2024-11-27T16:21:01,251 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=77 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/C/0dd56f2ddbe54ca8b5597a08be3d354f 2024-11-27T16:21:01,264 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/A/5f59276818374ce68a5bebb41171ea9b as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/A/5f59276818374ce68a5bebb41171ea9b 2024-11-27T16:21:01,276 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/A/5f59276818374ce68a5bebb41171ea9b, entries=250, sequenceid=77, filesize=16.3 K 2024-11-27T16:21:01,278 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/B/c33b6a2f1e4840f394d5f11471ac8a8e as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/B/c33b6a2f1e4840f394d5f11471ac8a8e 2024-11-27T16:21:01,291 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/B/c33b6a2f1e4840f394d5f11471ac8a8e, entries=150, sequenceid=77, filesize=11.7 K 2024-11-27T16:21:01,293 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/C/0dd56f2ddbe54ca8b5597a08be3d354f as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/C/0dd56f2ddbe54ca8b5597a08be3d354f 2024-11-27T16:21:01,308 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/C/0dd56f2ddbe54ca8b5597a08be3d354f, entries=150, sequenceid=77, filesize=11.7 K 2024-11-27T16:21:01,310 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=60.38 KB/61830 for d498a187112eb3635082ffac2dfb4cf9 in 306ms, sequenceid=77, compaction requested=false 2024-11-27T16:21:01,311 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for d498a187112eb3635082ffac2dfb4cf9: 2024-11-27T16:21:01,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(8581): Flush requested on d498a187112eb3635082ffac2dfb4cf9 2024-11-27T16:21:01,350 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing d498a187112eb3635082ffac2dfb4cf9 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-11-27T16:21:01,350 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d498a187112eb3635082ffac2dfb4cf9, store=A 2024-11-27T16:21:01,351 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:21:01,351 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d498a187112eb3635082ffac2dfb4cf9, store=B 2024-11-27T16:21:01,351 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:21:01,351 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d498a187112eb3635082ffac2dfb4cf9, store=C 2024-11-27T16:21:01,351 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:21:01,354 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:21:01,355 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-11-27T16:21:01,356 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9. 2024-11-27T16:21:01,356 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9. as already flushing 2024-11-27T16:21:01,356 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9. 2024-11-27T16:21:01,356 ERROR [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => d498a187112eb3635082ffac2dfb4cf9, NAME => 'TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:21:01,356 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => d498a187112eb3635082ffac2dfb4cf9, NAME => 'TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:21:01,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d498a187112eb3635082ffac2dfb4cf9, NAME => 'TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d498a187112eb3635082ffac2dfb4cf9, NAME => 'TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:21:01,362 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/A/4aab2c78ca6046a79ff93d2ce172a3f2 is 50, key is test_row_0/A:col10/1732724461030/Put/seqid=0 2024-11-27T16:21:01,406 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073741854_1030 (size=14341) 2024-11-27T16:21:01,408 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=91 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/A/4aab2c78ca6046a79ff93d2ce172a3f2 2024-11-27T16:21:01,414 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:01,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41120 deadline: 1732724521400, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:01,415 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:01,415 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:01,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41106 deadline: 1732724521402, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:01,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41170 deadline: 1732724521400, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:01,416 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:01,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41130 deadline: 1732724521409, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:01,417 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:01,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41100 deadline: 1732724521410, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:01,427 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/B/ee8609ee64a2476da21463623559ccc7 is 50, key is test_row_0/B:col10/1732724461030/Put/seqid=0 2024-11-27T16:21:01,446 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/B/f4638405c4244a5fa4e71fc6974b30d0 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/B/f4638405c4244a5fa4e71fc6974b30d0 2024-11-27T16:21:01,454 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073741855_1031 (size=12001) 2024-11-27T16:21:01,456 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=91 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/B/ee8609ee64a2476da21463623559ccc7 2024-11-27T16:21:01,462 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d498a187112eb3635082ffac2dfb4cf9/B of d498a187112eb3635082ffac2dfb4cf9 into f4638405c4244a5fa4e71fc6974b30d0(size=11.8 K), total size for store is 23.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T16:21:01,463 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d498a187112eb3635082ffac2dfb4cf9: 2024-11-27T16:21:01,464 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9., storeName=d498a187112eb3635082ffac2dfb4cf9/B, priority=13, startTime=1732724460920; duration=0sec 2024-11-27T16:21:01,464 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T16:21:01,464 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d498a187112eb3635082ffac2dfb4cf9:B 2024-11-27T16:21:01,478 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/C/5d367d07c29243b79ef715791d9bb333 is 50, key is test_row_0/C:col10/1732724461030/Put/seqid=0 2024-11-27T16:21:01,509 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:21:01,510 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-11-27T16:21:01,510 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9. 2024-11-27T16:21:01,510 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9. as already flushing 2024-11-27T16:21:01,511 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9. 2024-11-27T16:21:01,511 ERROR [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => d498a187112eb3635082ffac2dfb4cf9, NAME => 'TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:21:01,511 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => d498a187112eb3635082ffac2dfb4cf9, NAME => 'TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:21:01,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d498a187112eb3635082ffac2dfb4cf9, NAME => 'TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d498a187112eb3635082ffac2dfb4cf9, NAME => 'TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:21:01,516 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073741856_1032 (size=12001) 2024-11-27T16:21:01,517 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=91 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/C/5d367d07c29243b79ef715791d9bb333 2024-11-27T16:21:01,526 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:01,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41120 deadline: 1732724521517, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:01,527 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:01,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41106 deadline: 1732724521518, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:01,528 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:01,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41170 deadline: 1732724521518, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:01,528 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:01,528 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:01,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41130 deadline: 1732724521519, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:01,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41100 deadline: 1732724521520, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:01,530 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/A/4aab2c78ca6046a79ff93d2ce172a3f2 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/A/4aab2c78ca6046a79ff93d2ce172a3f2 2024-11-27T16:21:01,541 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/A/4aab2c78ca6046a79ff93d2ce172a3f2, entries=200, sequenceid=91, filesize=14.0 K 2024-11-27T16:21:01,542 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/B/ee8609ee64a2476da21463623559ccc7 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/B/ee8609ee64a2476da21463623559ccc7 2024-11-27T16:21:01,553 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/B/ee8609ee64a2476da21463623559ccc7, entries=150, sequenceid=91, filesize=11.7 K 2024-11-27T16:21:01,556 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/C/5d367d07c29243b79ef715791d9bb333 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/C/5d367d07c29243b79ef715791d9bb333 2024-11-27T16:21:01,566 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/C/5d367d07c29243b79ef715791d9bb333, entries=150, sequenceid=91, filesize=11.7 K 2024-11-27T16:21:01,567 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for d498a187112eb3635082ffac2dfb4cf9 in 217ms, sequenceid=91, compaction requested=true 2024-11-27T16:21:01,568 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for d498a187112eb3635082ffac2dfb4cf9: 2024-11-27T16:21:01,568 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d498a187112eb3635082ffac2dfb4cf9:A, priority=-2147483648, current under compaction store size is 2 2024-11-27T16:21:01,568 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T16:21:01,568 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-27T16:21:01,568 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d498a187112eb3635082ffac2dfb4cf9:B, priority=-2147483648, current under compaction store size is 3 2024-11-27T16:21:01,568 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-27T16:21:01,568 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d498a187112eb3635082ffac2dfb4cf9:C, priority=-2147483648, current under compaction store size is 3 2024-11-27T16:21:01,569 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-11-27T16:21:01,571 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-11-27T16:21:01,573 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 43126 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-27T16:21:01,573 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HStore(1540): d498a187112eb3635082ffac2dfb4cf9/A is initiating minor compaction (all files) 2024-11-27T16:21:01,573 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d498a187112eb3635082ffac2dfb4cf9/A in TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9. 2024-11-27T16:21:01,573 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/A/e8c8fe1362c24e8aaf1e7ec1a25bcb1a, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/A/5f59276818374ce68a5bebb41171ea9b, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/A/4aab2c78ca6046a79ff93d2ce172a3f2] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp, totalSize=42.1 K 2024-11-27T16:21:01,574 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting e8c8fe1362c24e8aaf1e7ec1a25bcb1a, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=53, earliestPutTs=1732724459556 2024-11-27T16:21:01,575 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting 5f59276818374ce68a5bebb41171ea9b, keycount=250, bloomtype=ROW, size=16.3 K, encoding=NONE, compression=NONE, seqNum=77, earliestPutTs=1732724460342 2024-11-27T16:21:01,575 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting 4aab2c78ca6046a79ff93d2ce172a3f2, keycount=200, bloomtype=ROW, size=14.0 K, encoding=NONE, compression=NONE, seqNum=91, earliestPutTs=1732724461023 2024-11-27T16:21:01,609 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): d498a187112eb3635082ffac2dfb4cf9#A#compaction#18 average throughput is 1.31 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-27T16:21:01,610 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/A/b8efb0547edc40d09f082dfae177e954 is 50, key is test_row_0/A:col10/1732724461030/Put/seqid=0 2024-11-27T16:21:01,622 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/C/e21547f1576a426097b2ea981de51e60 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/C/e21547f1576a426097b2ea981de51e60 2024-11-27T16:21:01,635 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d498a187112eb3635082ffac2dfb4cf9/C of d498a187112eb3635082ffac2dfb4cf9 into e21547f1576a426097b2ea981de51e60(size=11.8 K), total size for store is 35.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T16:21:01,635 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d498a187112eb3635082ffac2dfb4cf9: 2024-11-27T16:21:01,635 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9., storeName=d498a187112eb3635082ffac2dfb4cf9/C, priority=13, startTime=1732724460920; duration=0sec 2024-11-27T16:21:01,635 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-11-27T16:21:01,635 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d498a187112eb3635082ffac2dfb4cf9:C 2024-11-27T16:21:01,635 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d498a187112eb3635082ffac2dfb4cf9:C 2024-11-27T16:21:01,635 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-27T16:21:01,638 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-27T16:21:01,638 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HStore(1540): d498a187112eb3635082ffac2dfb4cf9/B is initiating minor compaction (all files) 2024-11-27T16:21:01,639 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d498a187112eb3635082ffac2dfb4cf9/B in TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9. 2024-11-27T16:21:01,639 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/B/f4638405c4244a5fa4e71fc6974b30d0, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/B/c33b6a2f1e4840f394d5f11471ac8a8e, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/B/ee8609ee64a2476da21463623559ccc7] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp, totalSize=35.3 K 2024-11-27T16:21:01,640 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.Compactor(224): Compacting f4638405c4244a5fa4e71fc6974b30d0, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=53, earliestPutTs=1732724459556 2024-11-27T16:21:01,641 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.Compactor(224): Compacting c33b6a2f1e4840f394d5f11471ac8a8e, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=77, earliestPutTs=1732724460349 2024-11-27T16:21:01,642 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.Compactor(224): Compacting ee8609ee64a2476da21463623559ccc7, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=91, earliestPutTs=1732724461023 2024-11-27T16:21:01,655 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073741857_1033 (size=12207) 2024-11-27T16:21:01,665 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:21:01,666 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-11-27T16:21:01,666 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9. 2024-11-27T16:21:01,666 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2837): Flushing d498a187112eb3635082ffac2dfb4cf9 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-11-27T16:21:01,667 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d498a187112eb3635082ffac2dfb4cf9, store=A 2024-11-27T16:21:01,667 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:21:01,667 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d498a187112eb3635082ffac2dfb4cf9, store=B 2024-11-27T16:21:01,667 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:21:01,667 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d498a187112eb3635082ffac2dfb4cf9, store=C 2024-11-27T16:21:01,667 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:21:01,669 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/A/b8efb0547edc40d09f082dfae177e954 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/A/b8efb0547edc40d09f082dfae177e954 2024-11-27T16:21:01,675 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-27T16:21:01,678 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): d498a187112eb3635082ffac2dfb4cf9#B#compaction#19 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-27T16:21:01,680 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:namespace' 2024-11-27T16:21:01,681 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/B/3154ac50155b493c9e1a293bf3943ec3 is 50, key is test_row_0/B:col10/1732724461030/Put/seqid=0 2024-11-27T16:21:01,682 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/A/1108887f283a4f4d91a7c954ed7db869 is 50, key is test_row_0/A:col10/1732724461405/Put/seqid=0 2024-11-27T16:21:01,687 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d498a187112eb3635082ffac2dfb4cf9/A of d498a187112eb3635082ffac2dfb4cf9 into b8efb0547edc40d09f082dfae177e954(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T16:21:01,687 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d498a187112eb3635082ffac2dfb4cf9: 2024-11-27T16:21:01,687 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9., storeName=d498a187112eb3635082ffac2dfb4cf9/A, priority=13, startTime=1732724461568; duration=0sec 2024-11-27T16:21:01,687 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-27T16:21:01,687 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d498a187112eb3635082ffac2dfb4cf9:A 2024-11-27T16:21:01,688 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-27T16:21:01,690 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-27T16:21:01,690 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HStore(1540): d498a187112eb3635082ffac2dfb4cf9/C is initiating minor compaction (all files) 2024-11-27T16:21:01,690 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d498a187112eb3635082ffac2dfb4cf9/C in TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9. 2024-11-27T16:21:01,691 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/C/e21547f1576a426097b2ea981de51e60, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/C/0dd56f2ddbe54ca8b5597a08be3d354f, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/C/5d367d07c29243b79ef715791d9bb333] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp, totalSize=35.3 K 2024-11-27T16:21:01,692 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting e21547f1576a426097b2ea981de51e60, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=53, earliestPutTs=1732724459556 2024-11-27T16:21:01,693 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting 0dd56f2ddbe54ca8b5597a08be3d354f, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=77, earliestPutTs=1732724460349 2024-11-27T16:21:01,693 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting 5d367d07c29243b79ef715791d9bb333, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=91, earliestPutTs=1732724461023 2024-11-27T16:21:01,727 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073741858_1034 (size=12207) 2024-11-27T16:21:01,735 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073741859_1035 (size=12001) 2024-11-27T16:21:01,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(8581): Flush requested on d498a187112eb3635082ffac2dfb4cf9 2024-11-27T16:21:01,736 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9. as already flushing 2024-11-27T16:21:01,744 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): d498a187112eb3635082ffac2dfb4cf9#C#compaction#21 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-27T16:21:01,745 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/C/172fbd44c9d94147b760377a210720a4 is 50, key is test_row_0/C:col10/1732724461030/Put/seqid=0 2024-11-27T16:21:01,759 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:01,760 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:01,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41106 deadline: 1732724521746, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:01,761 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:01,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41100 deadline: 1732724521751, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:01,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41120 deadline: 1732724521751, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:01,762 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:01,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41170 deadline: 1732724521756, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:01,763 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:01,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41130 deadline: 1732724521759, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:01,775 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073741860_1036 (size=12207) 2024-11-27T16:21:01,868 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:01,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41106 deadline: 1732724521864, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:01,869 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:01,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41100 deadline: 1732724521864, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:01,869 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:01,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41120 deadline: 1732724521865, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:01,870 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:01,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41170 deadline: 1732724521865, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:01,871 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:01,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41130 deadline: 1732724521866, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:02,073 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:02,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41120 deadline: 1732724522073, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:02,074 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:02,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41130 deadline: 1732724522073, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:02,076 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:02,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41100 deadline: 1732724522074, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:02,077 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:02,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41106 deadline: 1732724522074, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:02,077 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:02,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41170 deadline: 1732724522074, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:02,137 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=116 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/A/1108887f283a4f4d91a7c954ed7db869 2024-11-27T16:21:02,156 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/B/3154ac50155b493c9e1a293bf3943ec3 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/B/3154ac50155b493c9e1a293bf3943ec3 2024-11-27T16:21:02,172 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d498a187112eb3635082ffac2dfb4cf9/B of d498a187112eb3635082ffac2dfb4cf9 into 3154ac50155b493c9e1a293bf3943ec3(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T16:21:02,172 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d498a187112eb3635082ffac2dfb4cf9: 2024-11-27T16:21:02,172 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9., storeName=d498a187112eb3635082ffac2dfb4cf9/B, priority=13, startTime=1732724461568; duration=0sec 2024-11-27T16:21:02,179 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/B/8f657cdacc484061880f654ef4b0754c is 50, key is test_row_0/B:col10/1732724461405/Put/seqid=0 2024-11-27T16:21:02,172 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T16:21:02,185 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d498a187112eb3635082ffac2dfb4cf9:B 2024-11-27T16:21:02,192 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/C/172fbd44c9d94147b760377a210720a4 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/C/172fbd44c9d94147b760377a210720a4 2024-11-27T16:21:02,209 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d498a187112eb3635082ffac2dfb4cf9/C of d498a187112eb3635082ffac2dfb4cf9 into 172fbd44c9d94147b760377a210720a4(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T16:21:02,210 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d498a187112eb3635082ffac2dfb4cf9: 2024-11-27T16:21:02,210 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9., storeName=d498a187112eb3635082ffac2dfb4cf9/C, priority=13, startTime=1732724461568; duration=0sec 2024-11-27T16:21:02,210 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T16:21:02,210 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d498a187112eb3635082ffac2dfb4cf9:C 2024-11-27T16:21:02,230 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073741861_1037 (size=12001) 2024-11-27T16:21:02,232 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=116 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/B/8f657cdacc484061880f654ef4b0754c 2024-11-27T16:21:02,265 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/C/f7c8558f74bf43f1bf704ffbebb7bfa2 is 50, key is test_row_0/C:col10/1732724461405/Put/seqid=0 2024-11-27T16:21:02,289 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073741862_1038 (size=12001) 2024-11-27T16:21:02,292 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=116 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/C/f7c8558f74bf43f1bf704ffbebb7bfa2 2024-11-27T16:21:02,313 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/A/1108887f283a4f4d91a7c954ed7db869 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/A/1108887f283a4f4d91a7c954ed7db869 2024-11-27T16:21:02,329 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/A/1108887f283a4f4d91a7c954ed7db869, entries=150, sequenceid=116, filesize=11.7 K 2024-11-27T16:21:02,335 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/B/8f657cdacc484061880f654ef4b0754c as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/B/8f657cdacc484061880f654ef4b0754c 2024-11-27T16:21:02,359 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/B/8f657cdacc484061880f654ef4b0754c, entries=150, sequenceid=116, filesize=11.7 K 2024-11-27T16:21:02,362 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/C/f7c8558f74bf43f1bf704ffbebb7bfa2 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/C/f7c8558f74bf43f1bf704ffbebb7bfa2 2024-11-27T16:21:02,379 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/C/f7c8558f74bf43f1bf704ffbebb7bfa2, entries=150, sequenceid=116, filesize=11.7 K 2024-11-27T16:21:02,383 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=67.09 KB/68700 for d498a187112eb3635082ffac2dfb4cf9 in 716ms, sequenceid=116, compaction requested=false 2024-11-27T16:21:02,383 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2538): Flush status journal for d498a187112eb3635082ffac2dfb4cf9: 2024-11-27T16:21:02,383 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9. 2024-11-27T16:21:02,383 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=13 2024-11-27T16:21:02,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4106): Remote procedure done, pid=13 2024-11-27T16:21:02,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(8581): Flush requested on d498a187112eb3635082ffac2dfb4cf9 2024-11-27T16:21:02,389 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing d498a187112eb3635082ffac2dfb4cf9 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-11-27T16:21:02,391 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=13, resume processing ppid=12 2024-11-27T16:21:02,391 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=13, ppid=12, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 3.4240 sec 2024-11-27T16:21:02,392 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d498a187112eb3635082ffac2dfb4cf9, store=A 2024-11-27T16:21:02,393 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:21:02,393 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d498a187112eb3635082ffac2dfb4cf9, store=B 2024-11-27T16:21:02,393 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:21:02,393 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d498a187112eb3635082ffac2dfb4cf9, store=C 2024-11-27T16:21:02,393 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:21:02,395 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=12, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=12, table=TestAcidGuarantees in 3.4400 sec 2024-11-27T16:21:02,412 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/A/32069e77767e417489e0b765c7359f12 is 50, key is test_row_0/A:col10/1732724461755/Put/seqid=0 2024-11-27T16:21:02,429 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:02,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41130 deadline: 1732724522424, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:02,433 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:02,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41106 deadline: 1732724522427, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:02,435 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:02,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41120 deadline: 1732724522430, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:02,436 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:02,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41100 deadline: 1732724522430, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:02,437 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:02,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41170 deadline: 1732724522431, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:02,466 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073741863_1039 (size=12151) 2024-11-27T16:21:02,534 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:02,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41130 deadline: 1732724522532, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:02,536 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:02,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41106 deadline: 1732724522536, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:02,539 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:02,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41120 deadline: 1732724522537, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:02,543 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:02,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41170 deadline: 1732724522539, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:02,544 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:02,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41100 deadline: 1732724522539, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:02,718 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-27T16:21:02,718 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-27T16:21:02,723 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_namespace 2024-11-27T16:21:02,723 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_namespace Metrics about Tables on a single HBase RegionServer 2024-11-27T16:21:02,724 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-27T16:21:02,725 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-27T16:21:02,725 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-27T16:21:02,725 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-11-27T16:21:02,731 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestAcidGuarantees 2024-11-27T16:21:02,731 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestAcidGuarantees Metrics about Tables on a single HBase RegionServer 2024-11-27T16:21:02,739 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:02,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41130 deadline: 1732724522738, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:02,745 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:02,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41106 deadline: 1732724522740, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:02,747 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:02,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41120 deadline: 1732724522742, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:02,748 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:02,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41170 deadline: 1732724522747, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:02,749 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:02,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41100 deadline: 1732724522747, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:02,869 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=134 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/A/32069e77767e417489e0b765c7359f12 2024-11-27T16:21:02,890 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/B/9bb481d48ecd4c39b4757dbebc134751 is 50, key is test_row_0/B:col10/1732724461755/Put/seqid=0 2024-11-27T16:21:02,924 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073741864_1040 (size=12151) 2024-11-27T16:21:03,046 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:03,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41130 deadline: 1732724523045, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:03,051 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:03,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41106 deadline: 1732724523049, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:03,052 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:03,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41120 deadline: 1732724523051, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:03,055 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:03,055 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:03,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41100 deadline: 1732724523054, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:03,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41170 deadline: 1732724523054, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:03,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-11-27T16:21:03,075 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 12 completed 2024-11-27T16:21:03,078 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-27T16:21:03,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] procedure2.ProcedureExecutor(1098): Stored pid=14, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=14, table=TestAcidGuarantees 2024-11-27T16:21:03,083 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=14, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=14, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-27T16:21:03,086 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=14, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=14, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-27T16:21:03,086 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=15, ppid=14, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-27T16:21:03,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=14 2024-11-27T16:21:03,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=14 2024-11-27T16:21:03,240 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:21:03,241 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=15 2024-11-27T16:21:03,241 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9. 2024-11-27T16:21:03,241 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9. as already flushing 2024-11-27T16:21:03,241 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9. 2024-11-27T16:21:03,241 ERROR [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] handler.RSProcedureHandler(58): pid=15 java.io.IOException: Unable to complete flush {ENCODED => d498a187112eb3635082ffac2dfb4cf9, NAME => 'TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:21:03,242 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=15 java.io.IOException: Unable to complete flush {ENCODED => d498a187112eb3635082ffac2dfb4cf9, NAME => 'TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:21:03,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4114): Remote procedure failed, pid=15 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d498a187112eb3635082ffac2dfb4cf9, NAME => 'TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d498a187112eb3635082ffac2dfb4cf9, NAME => 'TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:21:03,326 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=134 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/B/9bb481d48ecd4c39b4757dbebc134751 2024-11-27T16:21:03,350 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/C/07526932f8c142dc83aa26e8ea7fe2a3 is 50, key is test_row_0/C:col10/1732724461755/Put/seqid=0 2024-11-27T16:21:03,375 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073741865_1041 (size=12151) 2024-11-27T16:21:03,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=14 2024-11-27T16:21:03,395 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:21:03,396 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=15 2024-11-27T16:21:03,397 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9. 2024-11-27T16:21:03,397 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9. as already flushing 2024-11-27T16:21:03,397 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9. 2024-11-27T16:21:03,397 ERROR [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] handler.RSProcedureHandler(58): pid=15 java.io.IOException: Unable to complete flush {ENCODED => d498a187112eb3635082ffac2dfb4cf9, NAME => 'TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:21:03,398 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=15 java.io.IOException: Unable to complete flush {ENCODED => d498a187112eb3635082ffac2dfb4cf9, NAME => 'TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:21:03,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4114): Remote procedure failed, pid=15 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d498a187112eb3635082ffac2dfb4cf9, NAME => 'TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d498a187112eb3635082ffac2dfb4cf9, NAME => 'TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:21:03,550 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:21:03,551 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=15 2024-11-27T16:21:03,551 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9. 2024-11-27T16:21:03,552 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9. as already flushing 2024-11-27T16:21:03,552 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9. 2024-11-27T16:21:03,552 ERROR [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] handler.RSProcedureHandler(58): pid=15 java.io.IOException: Unable to complete flush {ENCODED => d498a187112eb3635082ffac2dfb4cf9, NAME => 'TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:21:03,552 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=15 java.io.IOException: Unable to complete flush {ENCODED => d498a187112eb3635082ffac2dfb4cf9, NAME => 'TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:21:03,553 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:03,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41130 deadline: 1732724523551, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:03,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4114): Remote procedure failed, pid=15 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d498a187112eb3635082ffac2dfb4cf9, NAME => 'TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d498a187112eb3635082ffac2dfb4cf9, NAME => 'TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:21:03,556 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:03,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41120 deadline: 1732724523555, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:03,559 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:03,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41106 deadline: 1732724523558, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:03,562 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:03,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41100 deadline: 1732724523560, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:03,563 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:03,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41170 deadline: 1732724523562, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:03,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=14 2024-11-27T16:21:03,706 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:21:03,708 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=15 2024-11-27T16:21:03,708 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9. 2024-11-27T16:21:03,709 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9. as already flushing 2024-11-27T16:21:03,709 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9. 2024-11-27T16:21:03,709 ERROR [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] handler.RSProcedureHandler(58): pid=15 java.io.IOException: Unable to complete flush {ENCODED => d498a187112eb3635082ffac2dfb4cf9, NAME => 'TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:21:03,709 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=15 java.io.IOException: Unable to complete flush {ENCODED => d498a187112eb3635082ffac2dfb4cf9, NAME => 'TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:21:03,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4114): Remote procedure failed, pid=15 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d498a187112eb3635082ffac2dfb4cf9, NAME => 'TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d498a187112eb3635082ffac2dfb4cf9, NAME => 'TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:21:03,776 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=134 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/C/07526932f8c142dc83aa26e8ea7fe2a3 2024-11-27T16:21:03,793 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/A/32069e77767e417489e0b765c7359f12 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/A/32069e77767e417489e0b765c7359f12 2024-11-27T16:21:03,804 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/A/32069e77767e417489e0b765c7359f12, entries=150, sequenceid=134, filesize=11.9 K 2024-11-27T16:21:03,806 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/B/9bb481d48ecd4c39b4757dbebc134751 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/B/9bb481d48ecd4c39b4757dbebc134751 2024-11-27T16:21:03,818 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/B/9bb481d48ecd4c39b4757dbebc134751, entries=150, sequenceid=134, filesize=11.9 K 2024-11-27T16:21:03,821 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/C/07526932f8c142dc83aa26e8ea7fe2a3 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/C/07526932f8c142dc83aa26e8ea7fe2a3 2024-11-27T16:21:03,838 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/C/07526932f8c142dc83aa26e8ea7fe2a3, entries=150, sequenceid=134, filesize=11.9 K 2024-11-27T16:21:03,840 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=127.47 KB/130530 for d498a187112eb3635082ffac2dfb4cf9 in 1451ms, sequenceid=134, compaction requested=true 2024-11-27T16:21:03,841 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for d498a187112eb3635082ffac2dfb4cf9: 2024-11-27T16:21:03,841 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-27T16:21:03,843 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36359 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-27T16:21:03,843 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HStore(1540): d498a187112eb3635082ffac2dfb4cf9/A is initiating minor compaction (all files) 2024-11-27T16:21:03,843 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d498a187112eb3635082ffac2dfb4cf9/A in TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9. 2024-11-27T16:21:03,843 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d498a187112eb3635082ffac2dfb4cf9:A, priority=-2147483648, current under compaction store size is 1 2024-11-27T16:21:03,843 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/A/b8efb0547edc40d09f082dfae177e954, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/A/1108887f283a4f4d91a7c954ed7db869, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/A/32069e77767e417489e0b765c7359f12] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp, totalSize=35.5 K 2024-11-27T16:21:03,843 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T16:21:03,844 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-27T16:21:03,844 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d498a187112eb3635082ffac2dfb4cf9:B, priority=-2147483648, current under compaction store size is 2 2024-11-27T16:21:03,844 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T16:21:03,844 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d498a187112eb3635082ffac2dfb4cf9:C, priority=-2147483648, current under compaction store size is 3 2024-11-27T16:21:03,844 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.Compactor(224): Compacting b8efb0547edc40d09f082dfae177e954, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=91, earliestPutTs=1732724461023 2024-11-27T16:21:03,845 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36359 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-27T16:21:03,845 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1108887f283a4f4d91a7c954ed7db869, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=116, earliestPutTs=1732724461397 2024-11-27T16:21:03,845 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HStore(1540): d498a187112eb3635082ffac2dfb4cf9/B is initiating minor compaction (all files) 2024-11-27T16:21:03,846 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d498a187112eb3635082ffac2dfb4cf9/B in TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9. 2024-11-27T16:21:03,846 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/B/3154ac50155b493c9e1a293bf3943ec3, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/B/8f657cdacc484061880f654ef4b0754c, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/B/9bb481d48ecd4c39b4757dbebc134751] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp, totalSize=35.5 K 2024-11-27T16:21:03,846 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-27T16:21:03,846 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 32069e77767e417489e0b765c7359f12, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=134, earliestPutTs=1732724461755 2024-11-27T16:21:03,848 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting 3154ac50155b493c9e1a293bf3943ec3, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=91, earliestPutTs=1732724461023 2024-11-27T16:21:03,849 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting 8f657cdacc484061880f654ef4b0754c, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=116, earliestPutTs=1732724461397 2024-11-27T16:21:03,849 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting 9bb481d48ecd4c39b4757dbebc134751, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=134, earliestPutTs=1732724461755 2024-11-27T16:21:03,862 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:21:03,863 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=15 2024-11-27T16:21:03,863 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9. 2024-11-27T16:21:03,864 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(2837): Flushing d498a187112eb3635082ffac2dfb4cf9 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-11-27T16:21:03,864 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d498a187112eb3635082ffac2dfb4cf9, store=A 2024-11-27T16:21:03,864 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:21:03,864 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d498a187112eb3635082ffac2dfb4cf9, store=B 2024-11-27T16:21:03,864 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:21:03,865 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d498a187112eb3635082ffac2dfb4cf9, store=C 2024-11-27T16:21:03,865 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:21:03,875 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): d498a187112eb3635082ffac2dfb4cf9#A#compaction#27 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-27T16:21:03,876 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/A/8d2ed70f414a4d9a9b7b99cff3ed306e is 50, key is test_row_0/A:col10/1732724461755/Put/seqid=0 2024-11-27T16:21:03,885 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): d498a187112eb3635082ffac2dfb4cf9#B#compaction#28 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-27T16:21:03,886 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/B/90c57b4d9df6410c9122c0fd597bb69c is 50, key is test_row_0/B:col10/1732724461755/Put/seqid=0 2024-11-27T16:21:03,889 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/A/e14183462e9948eaa9b02f92420e4273 is 50, key is test_row_0/A:col10/1732724462428/Put/seqid=0 2024-11-27T16:21:03,908 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073741866_1042 (size=12459) 2024-11-27T16:21:03,921 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/A/8d2ed70f414a4d9a9b7b99cff3ed306e as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/A/8d2ed70f414a4d9a9b7b99cff3ed306e 2024-11-27T16:21:03,935 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073741867_1043 (size=12459) 2024-11-27T16:21:03,937 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d498a187112eb3635082ffac2dfb4cf9/A of d498a187112eb3635082ffac2dfb4cf9 into 8d2ed70f414a4d9a9b7b99cff3ed306e(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T16:21:03,939 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d498a187112eb3635082ffac2dfb4cf9: 2024-11-27T16:21:03,939 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9., storeName=d498a187112eb3635082ffac2dfb4cf9/A, priority=13, startTime=1732724463841; duration=0sec 2024-11-27T16:21:03,939 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-27T16:21:03,939 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d498a187112eb3635082ffac2dfb4cf9:A 2024-11-27T16:21:03,939 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-27T16:21:03,946 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36359 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-27T16:21:03,946 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073741868_1044 (size=12151) 2024-11-27T16:21:03,946 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HStore(1540): d498a187112eb3635082ffac2dfb4cf9/C is initiating minor compaction (all files) 2024-11-27T16:21:03,946 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d498a187112eb3635082ffac2dfb4cf9/C in TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9. 2024-11-27T16:21:03,947 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/C/172fbd44c9d94147b760377a210720a4, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/C/f7c8558f74bf43f1bf704ffbebb7bfa2, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/C/07526932f8c142dc83aa26e8ea7fe2a3] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp, totalSize=35.5 K 2024-11-27T16:21:03,947 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/B/90c57b4d9df6410c9122c0fd597bb69c as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/B/90c57b4d9df6410c9122c0fd597bb69c 2024-11-27T16:21:03,948 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 172fbd44c9d94147b760377a210720a4, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=91, earliestPutTs=1732724461023 2024-11-27T16:21:03,949 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.Compactor(224): Compacting f7c8558f74bf43f1bf704ffbebb7bfa2, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=116, earliestPutTs=1732724461397 2024-11-27T16:21:03,949 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=156 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/A/e14183462e9948eaa9b02f92420e4273 2024-11-27T16:21:03,950 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 07526932f8c142dc83aa26e8ea7fe2a3, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=134, earliestPutTs=1732724461755 2024-11-27T16:21:03,969 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d498a187112eb3635082ffac2dfb4cf9/B of d498a187112eb3635082ffac2dfb4cf9 into 90c57b4d9df6410c9122c0fd597bb69c(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T16:21:03,969 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d498a187112eb3635082ffac2dfb4cf9: 2024-11-27T16:21:03,969 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9., storeName=d498a187112eb3635082ffac2dfb4cf9/B, priority=13, startTime=1732724463843; duration=0sec 2024-11-27T16:21:03,969 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T16:21:03,971 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/B/20994f7d06ed40959f61194e939215cd is 50, key is test_row_0/B:col10/1732724462428/Put/seqid=0 2024-11-27T16:21:03,971 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d498a187112eb3635082ffac2dfb4cf9:B 2024-11-27T16:21:03,978 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): d498a187112eb3635082ffac2dfb4cf9#C#compaction#31 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-27T16:21:03,979 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/C/73a00bdf6ec0434d99b7ba354bbd58d8 is 50, key is test_row_0/C:col10/1732724461755/Put/seqid=0 2024-11-27T16:21:03,998 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073741869_1045 (size=12151) 2024-11-27T16:21:04,001 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073741870_1046 (size=12459) 2024-11-27T16:21:04,002 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=156 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/B/20994f7d06ed40959f61194e939215cd 2024-11-27T16:21:04,016 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/C/73a00bdf6ec0434d99b7ba354bbd58d8 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/C/73a00bdf6ec0434d99b7ba354bbd58d8 2024-11-27T16:21:04,029 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/C/99a722ab62554990bbde265a8eb5d0ce is 50, key is test_row_0/C:col10/1732724462428/Put/seqid=0 2024-11-27T16:21:04,031 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d498a187112eb3635082ffac2dfb4cf9/C of d498a187112eb3635082ffac2dfb4cf9 into 73a00bdf6ec0434d99b7ba354bbd58d8(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T16:21:04,031 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d498a187112eb3635082ffac2dfb4cf9: 2024-11-27T16:21:04,031 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9., storeName=d498a187112eb3635082ffac2dfb4cf9/C, priority=13, startTime=1732724463844; duration=0sec 2024-11-27T16:21:04,032 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T16:21:04,032 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d498a187112eb3635082ffac2dfb4cf9:C 2024-11-27T16:21:04,082 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073741871_1047 (size=12151) 2024-11-27T16:21:04,086 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=156 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/C/99a722ab62554990bbde265a8eb5d0ce 2024-11-27T16:21:04,099 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/A/e14183462e9948eaa9b02f92420e4273 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/A/e14183462e9948eaa9b02f92420e4273 2024-11-27T16:21:04,113 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/A/e14183462e9948eaa9b02f92420e4273, entries=150, sequenceid=156, filesize=11.9 K 2024-11-27T16:21:04,116 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/B/20994f7d06ed40959f61194e939215cd as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/B/20994f7d06ed40959f61194e939215cd 2024-11-27T16:21:04,127 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/B/20994f7d06ed40959f61194e939215cd, entries=150, sequenceid=156, filesize=11.9 K 2024-11-27T16:21:04,130 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/C/99a722ab62554990bbde265a8eb5d0ce as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/C/99a722ab62554990bbde265a8eb5d0ce 2024-11-27T16:21:04,140 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/C/99a722ab62554990bbde265a8eb5d0ce, entries=150, sequenceid=156, filesize=11.9 K 2024-11-27T16:21:04,142 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(3040): Finished flush of dataSize ~127.47 KB/130530, heapSize ~334.69 KB/342720, currentSize=0 B/0 for d498a187112eb3635082ffac2dfb4cf9 in 279ms, sequenceid=156, compaction requested=false 2024-11-27T16:21:04,142 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(2538): Flush status journal for d498a187112eb3635082ffac2dfb4cf9: 2024-11-27T16:21:04,143 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9. 2024-11-27T16:21:04,143 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=15 2024-11-27T16:21:04,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4106): Remote procedure done, pid=15 2024-11-27T16:21:04,149 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=15, resume processing ppid=14 2024-11-27T16:21:04,149 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=15, ppid=14, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.0580 sec 2024-11-27T16:21:04,152 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=14, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=14, table=TestAcidGuarantees in 1.0710 sec 2024-11-27T16:21:04,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=14 2024-11-27T16:21:04,195 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 14 completed 2024-11-27T16:21:04,197 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-27T16:21:04,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] procedure2.ProcedureExecutor(1098): Stored pid=16, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=16, table=TestAcidGuarantees 2024-11-27T16:21:04,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=16 2024-11-27T16:21:04,200 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=16, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=16, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-27T16:21:04,201 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=16, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=16, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-27T16:21:04,201 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=17, ppid=16, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-27T16:21:04,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=16 2024-11-27T16:21:04,355 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:21:04,355 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=17 2024-11-27T16:21:04,356 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9. 2024-11-27T16:21:04,356 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2538): Flush status journal for d498a187112eb3635082ffac2dfb4cf9: 2024-11-27T16:21:04,356 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9. 2024-11-27T16:21:04,356 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=17 2024-11-27T16:21:04,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4106): Remote procedure done, pid=17 2024-11-27T16:21:04,361 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=17, resume processing ppid=16 2024-11-27T16:21:04,361 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=17, ppid=16, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 157 msec 2024-11-27T16:21:04,366 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=16, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=16, table=TestAcidGuarantees in 167 msec 2024-11-27T16:21:04,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=16 2024-11-27T16:21:04,503 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 16 completed 2024-11-27T16:21:04,506 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-27T16:21:04,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] procedure2.ProcedureExecutor(1098): Stored pid=18, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=18, table=TestAcidGuarantees 2024-11-27T16:21:04,511 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=18, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=18, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-27T16:21:04,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=18 2024-11-27T16:21:04,513 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=18, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=18, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-27T16:21:04,513 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=19, ppid=18, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-27T16:21:04,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=18 2024-11-27T16:21:04,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(8581): Flush requested on d498a187112eb3635082ffac2dfb4cf9 2024-11-27T16:21:04,615 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing d498a187112eb3635082ffac2dfb4cf9 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-27T16:21:04,615 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d498a187112eb3635082ffac2dfb4cf9, store=A 2024-11-27T16:21:04,615 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:21:04,615 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d498a187112eb3635082ffac2dfb4cf9, store=B 2024-11-27T16:21:04,615 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:21:04,615 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d498a187112eb3635082ffac2dfb4cf9, store=C 2024-11-27T16:21:04,616 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:21:04,638 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/A/111a3867c2e04e438968114192cca81f is 50, key is test_row_0/A:col10/1732724464586/Put/seqid=0 2024-11-27T16:21:04,659 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:04,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41100 deadline: 1732724524652, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:04,660 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:04,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41170 deadline: 1732724524652, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:04,661 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:04,661 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:04,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41120 deadline: 1732724524656, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:04,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41106 deadline: 1732724524654, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:04,662 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:04,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41130 deadline: 1732724524659, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:04,666 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:21:04,667 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=19 2024-11-27T16:21:04,667 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9. 2024-11-27T16:21:04,667 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9. as already flushing 2024-11-27T16:21:04,667 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9. 2024-11-27T16:21:04,667 ERROR [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] handler.RSProcedureHandler(58): pid=19 java.io.IOException: Unable to complete flush {ENCODED => d498a187112eb3635082ffac2dfb4cf9, NAME => 'TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:21:04,667 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=19 java.io.IOException: Unable to complete flush {ENCODED => d498a187112eb3635082ffac2dfb4cf9, NAME => 'TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:21:04,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4114): Remote procedure failed, pid=19 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d498a187112eb3635082ffac2dfb4cf9, NAME => 'TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d498a187112eb3635082ffac2dfb4cf9, NAME => 'TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:21:04,671 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073741872_1048 (size=14541) 2024-11-27T16:21:04,672 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=171 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/A/111a3867c2e04e438968114192cca81f 2024-11-27T16:21:04,690 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/B/1b106ae95b084f5a9b0a8a97a5fdaacb is 50, key is test_row_0/B:col10/1732724464586/Put/seqid=0 2024-11-27T16:21:04,722 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073741873_1049 (size=12151) 2024-11-27T16:21:04,763 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:04,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41100 deadline: 1732724524762, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:04,764 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:04,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41170 deadline: 1732724524762, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:04,765 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:04,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41106 deadline: 1732724524763, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:04,769 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:04,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41120 deadline: 1732724524769, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:04,772 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:04,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41130 deadline: 1732724524770, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:04,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=18 2024-11-27T16:21:04,821 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:21:04,821 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=19 2024-11-27T16:21:04,821 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9. 2024-11-27T16:21:04,822 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9. as already flushing 2024-11-27T16:21:04,822 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9. 2024-11-27T16:21:04,822 ERROR [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] handler.RSProcedureHandler(58): pid=19 java.io.IOException: Unable to complete flush {ENCODED => d498a187112eb3635082ffac2dfb4cf9, NAME => 'TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:21:04,822 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=19 java.io.IOException: Unable to complete flush {ENCODED => d498a187112eb3635082ffac2dfb4cf9, NAME => 'TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:21:04,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4114): Remote procedure failed, pid=19 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d498a187112eb3635082ffac2dfb4cf9, NAME => 'TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d498a187112eb3635082ffac2dfb4cf9, NAME => 'TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:21:04,967 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:04,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41100 deadline: 1732724524966, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:04,969 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:04,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41170 deadline: 1732724524966, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:04,970 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:04,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41106 deadline: 1732724524970, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:04,973 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:04,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41120 deadline: 1732724524971, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:04,975 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:04,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41130 deadline: 1732724524974, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:04,976 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:21:04,976 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=19 2024-11-27T16:21:04,976 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9. 2024-11-27T16:21:04,976 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9. as already flushing 2024-11-27T16:21:04,976 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9. 2024-11-27T16:21:04,977 ERROR [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] handler.RSProcedureHandler(58): pid=19 java.io.IOException: Unable to complete flush {ENCODED => d498a187112eb3635082ffac2dfb4cf9, NAME => 'TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:21:04,977 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=19 java.io.IOException: Unable to complete flush {ENCODED => d498a187112eb3635082ffac2dfb4cf9, NAME => 'TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:21:04,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4114): Remote procedure failed, pid=19 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d498a187112eb3635082ffac2dfb4cf9, NAME => 'TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d498a187112eb3635082ffac2dfb4cf9, NAME => 'TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:21:05,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=18 2024-11-27T16:21:05,123 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=171 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/B/1b106ae95b084f5a9b0a8a97a5fdaacb 2024-11-27T16:21:05,130 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:21:05,131 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=19 2024-11-27T16:21:05,131 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9. 2024-11-27T16:21:05,132 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9. as already flushing 2024-11-27T16:21:05,132 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9. 2024-11-27T16:21:05,132 ERROR [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] handler.RSProcedureHandler(58): pid=19 java.io.IOException: Unable to complete flush {ENCODED => d498a187112eb3635082ffac2dfb4cf9, NAME => 'TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:21:05,132 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=19 java.io.IOException: Unable to complete flush {ENCODED => d498a187112eb3635082ffac2dfb4cf9, NAME => 'TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:21:05,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4114): Remote procedure failed, pid=19 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d498a187112eb3635082ffac2dfb4cf9, NAME => 'TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d498a187112eb3635082ffac2dfb4cf9, NAME => 'TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:21:05,150 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/C/156e1da83f914a1e9e418394eab431f0 is 50, key is test_row_0/C:col10/1732724464586/Put/seqid=0 2024-11-27T16:21:05,180 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073741874_1050 (size=12151) 2024-11-27T16:21:05,181 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=171 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/C/156e1da83f914a1e9e418394eab431f0 2024-11-27T16:21:05,195 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/A/111a3867c2e04e438968114192cca81f as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/A/111a3867c2e04e438968114192cca81f 2024-11-27T16:21:05,205 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/A/111a3867c2e04e438968114192cca81f, entries=200, sequenceid=171, filesize=14.2 K 2024-11-27T16:21:05,208 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/B/1b106ae95b084f5a9b0a8a97a5fdaacb as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/B/1b106ae95b084f5a9b0a8a97a5fdaacb 2024-11-27T16:21:05,222 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/B/1b106ae95b084f5a9b0a8a97a5fdaacb, entries=150, sequenceid=171, filesize=11.9 K 2024-11-27T16:21:05,228 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/C/156e1da83f914a1e9e418394eab431f0 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/C/156e1da83f914a1e9e418394eab431f0 2024-11-27T16:21:05,238 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/C/156e1da83f914a1e9e418394eab431f0, entries=150, sequenceid=171, filesize=11.9 K 2024-11-27T16:21:05,240 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for d498a187112eb3635082ffac2dfb4cf9 in 625ms, sequenceid=171, compaction requested=true 2024-11-27T16:21:05,240 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for d498a187112eb3635082ffac2dfb4cf9: 2024-11-27T16:21:05,240 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d498a187112eb3635082ffac2dfb4cf9:A, priority=-2147483648, current under compaction store size is 1 2024-11-27T16:21:05,240 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T16:21:05,240 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-27T16:21:05,240 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d498a187112eb3635082ffac2dfb4cf9:B, priority=-2147483648, current under compaction store size is 2 2024-11-27T16:21:05,240 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T16:21:05,240 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d498a187112eb3635082ffac2dfb4cf9:C, priority=-2147483648, current under compaction store size is 3 2024-11-27T16:21:05,240 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-27T16:21:05,240 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-27T16:21:05,242 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 39151 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-27T16:21:05,242 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HStore(1540): d498a187112eb3635082ffac2dfb4cf9/A is initiating minor compaction (all files) 2024-11-27T16:21:05,242 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d498a187112eb3635082ffac2dfb4cf9/A in TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9. 2024-11-27T16:21:05,242 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/A/8d2ed70f414a4d9a9b7b99cff3ed306e, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/A/e14183462e9948eaa9b02f92420e4273, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/A/111a3867c2e04e438968114192cca81f] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp, totalSize=38.2 K 2024-11-27T16:21:05,243 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36761 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-27T16:21:05,243 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HStore(1540): d498a187112eb3635082ffac2dfb4cf9/B is initiating minor compaction (all files) 2024-11-27T16:21:05,243 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d498a187112eb3635082ffac2dfb4cf9/B in TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9. 2024-11-27T16:21:05,243 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/B/90c57b4d9df6410c9122c0fd597bb69c, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/B/20994f7d06ed40959f61194e939215cd, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/B/1b106ae95b084f5a9b0a8a97a5fdaacb] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp, totalSize=35.9 K 2024-11-27T16:21:05,244 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8d2ed70f414a4d9a9b7b99cff3ed306e, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=134, earliestPutTs=1732724461755 2024-11-27T16:21:05,244 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting 90c57b4d9df6410c9122c0fd597bb69c, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=134, earliestPutTs=1732724461755 2024-11-27T16:21:05,245 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.Compactor(224): Compacting e14183462e9948eaa9b02f92420e4273, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=156, earliestPutTs=1732724462412 2024-11-27T16:21:05,245 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting 20994f7d06ed40959f61194e939215cd, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=156, earliestPutTs=1732724462412 2024-11-27T16:21:05,246 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 111a3867c2e04e438968114192cca81f, keycount=200, bloomtype=ROW, size=14.2 K, encoding=NONE, compression=NONE, seqNum=171, earliestPutTs=1732724464572 2024-11-27T16:21:05,246 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting 1b106ae95b084f5a9b0a8a97a5fdaacb, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=171, earliestPutTs=1732724464586 2024-11-27T16:21:05,259 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): d498a187112eb3635082ffac2dfb4cf9#B#compaction#36 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-27T16:21:05,260 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/B/d8c1cc8878ab4e40bdb2c42f38b96cb1 is 50, key is test_row_0/B:col10/1732724464586/Put/seqid=0 2024-11-27T16:21:05,275 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing d498a187112eb3635082ffac2dfb4cf9 3/3 column families, dataSize=161.02 KB heapSize=422.63 KB 2024-11-27T16:21:05,276 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d498a187112eb3635082ffac2dfb4cf9, store=A 2024-11-27T16:21:05,276 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:21:05,276 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d498a187112eb3635082ffac2dfb4cf9, store=B 2024-11-27T16:21:05,276 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:21:05,276 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d498a187112eb3635082ffac2dfb4cf9, store=C 2024-11-27T16:21:05,276 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:21:05,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(8581): Flush requested on d498a187112eb3635082ffac2dfb4cf9 2024-11-27T16:21:05,278 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): d498a187112eb3635082ffac2dfb4cf9#A#compaction#37 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-27T16:21:05,279 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/A/3c7df496ef5b49d7a39271bee7e00519 is 50, key is test_row_0/A:col10/1732724464586/Put/seqid=0 2024-11-27T16:21:05,286 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:21:05,287 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=19 2024-11-27T16:21:05,288 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9. 2024-11-27T16:21:05,288 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9. as already flushing 2024-11-27T16:21:05,288 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9. 2024-11-27T16:21:05,288 ERROR [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] handler.RSProcedureHandler(58): pid=19 java.io.IOException: Unable to complete flush {ENCODED => d498a187112eb3635082ffac2dfb4cf9, NAME => 'TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:21:05,288 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=19 java.io.IOException: Unable to complete flush {ENCODED => d498a187112eb3635082ffac2dfb4cf9, NAME => 'TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:21:05,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4114): Remote procedure failed, pid=19 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d498a187112eb3635082ffac2dfb4cf9, NAME => 'TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d498a187112eb3635082ffac2dfb4cf9, NAME => 'TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:21:05,291 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:05,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41100 deadline: 1732724525285, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:05,296 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:05,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41120 deadline: 1732724525285, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:05,297 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:05,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41170 deadline: 1732724525290, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:05,297 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:05,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41130 deadline: 1732724525290, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:05,298 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:05,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41106 deadline: 1732724525291, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:05,306 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/A/b7493f62cc1b4d1784057d9aadf636dd is 50, key is test_row_0/A:col10/1732724464657/Put/seqid=0 2024-11-27T16:21:05,326 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073741875_1051 (size=12561) 2024-11-27T16:21:05,335 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073741876_1052 (size=12561) 2024-11-27T16:21:05,347 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/A/3c7df496ef5b49d7a39271bee7e00519 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/A/3c7df496ef5b49d7a39271bee7e00519 2024-11-27T16:21:05,364 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073741877_1053 (size=12151) 2024-11-27T16:21:05,366 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d498a187112eb3635082ffac2dfb4cf9/A of d498a187112eb3635082ffac2dfb4cf9 into 3c7df496ef5b49d7a39271bee7e00519(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T16:21:05,366 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d498a187112eb3635082ffac2dfb4cf9: 2024-11-27T16:21:05,366 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9., storeName=d498a187112eb3635082ffac2dfb4cf9/A, priority=13, startTime=1732724465240; duration=0sec 2024-11-27T16:21:05,367 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-27T16:21:05,367 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d498a187112eb3635082ffac2dfb4cf9:A 2024-11-27T16:21:05,367 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-27T16:21:05,369 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36761 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-27T16:21:05,369 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HStore(1540): d498a187112eb3635082ffac2dfb4cf9/C is initiating minor compaction (all files) 2024-11-27T16:21:05,369 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d498a187112eb3635082ffac2dfb4cf9/C in TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9. 2024-11-27T16:21:05,369 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/C/73a00bdf6ec0434d99b7ba354bbd58d8, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/C/99a722ab62554990bbde265a8eb5d0ce, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/C/156e1da83f914a1e9e418394eab431f0] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp, totalSize=35.9 K 2024-11-27T16:21:05,370 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 73a00bdf6ec0434d99b7ba354bbd58d8, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=134, earliestPutTs=1732724461755 2024-11-27T16:21:05,371 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 99a722ab62554990bbde265a8eb5d0ce, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=156, earliestPutTs=1732724462412 2024-11-27T16:21:05,372 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=198 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/A/b7493f62cc1b4d1784057d9aadf636dd 2024-11-27T16:21:05,372 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 156e1da83f914a1e9e418394eab431f0, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=171, earliestPutTs=1732724464586 2024-11-27T16:21:05,388 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/B/3a3e6740b0c44e4d88aa2671afc29f5b is 50, key is test_row_0/B:col10/1732724464657/Put/seqid=0 2024-11-27T16:21:05,390 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): d498a187112eb3635082ffac2dfb4cf9#C#compaction#40 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-27T16:21:05,391 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/C/052b42f85efd48bbb005a447999dabda is 50, key is test_row_0/C:col10/1732724464586/Put/seqid=0 2024-11-27T16:21:05,395 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:05,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41100 deadline: 1732724525394, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:05,401 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:05,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41120 deadline: 1732724525398, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:05,402 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:05,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41170 deadline: 1732724525399, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:05,403 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:05,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41130 deadline: 1732724525399, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:05,404 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:05,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41106 deadline: 1732724525399, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:05,427 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073741878_1054 (size=12561) 2024-11-27T16:21:05,432 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073741879_1055 (size=12151) 2024-11-27T16:21:05,433 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=198 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/B/3a3e6740b0c44e4d88aa2671afc29f5b 2024-11-27T16:21:05,439 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/C/052b42f85efd48bbb005a447999dabda as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/C/052b42f85efd48bbb005a447999dabda 2024-11-27T16:21:05,440 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:21:05,441 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=19 2024-11-27T16:21:05,441 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9. 2024-11-27T16:21:05,441 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9. as already flushing 2024-11-27T16:21:05,441 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9. 2024-11-27T16:21:05,442 ERROR [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] handler.RSProcedureHandler(58): pid=19 java.io.IOException: Unable to complete flush {ENCODED => d498a187112eb3635082ffac2dfb4cf9, NAME => 'TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:21:05,442 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=19 java.io.IOException: Unable to complete flush {ENCODED => d498a187112eb3635082ffac2dfb4cf9, NAME => 'TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:21:05,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4114): Remote procedure failed, pid=19 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d498a187112eb3635082ffac2dfb4cf9, NAME => 'TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d498a187112eb3635082ffac2dfb4cf9, NAME => 'TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:21:05,450 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d498a187112eb3635082ffac2dfb4cf9/C of d498a187112eb3635082ffac2dfb4cf9 into 052b42f85efd48bbb005a447999dabda(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T16:21:05,450 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d498a187112eb3635082ffac2dfb4cf9: 2024-11-27T16:21:05,450 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9., storeName=d498a187112eb3635082ffac2dfb4cf9/C, priority=13, startTime=1732724465240; duration=0sec 2024-11-27T16:21:05,450 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T16:21:05,450 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d498a187112eb3635082ffac2dfb4cf9:C 2024-11-27T16:21:05,453 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/C/f8140f3ba7ff429cb2a72fe10adf911f is 50, key is test_row_0/C:col10/1732724464657/Put/seqid=0 2024-11-27T16:21:05,482 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073741880_1056 (size=12151) 2024-11-27T16:21:05,484 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=198 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/C/f8140f3ba7ff429cb2a72fe10adf911f 2024-11-27T16:21:05,492 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/A/b7493f62cc1b4d1784057d9aadf636dd as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/A/b7493f62cc1b4d1784057d9aadf636dd 2024-11-27T16:21:05,500 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/A/b7493f62cc1b4d1784057d9aadf636dd, entries=150, sequenceid=198, filesize=11.9 K 2024-11-27T16:21:05,502 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/B/3a3e6740b0c44e4d88aa2671afc29f5b as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/B/3a3e6740b0c44e4d88aa2671afc29f5b 2024-11-27T16:21:05,510 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/B/3a3e6740b0c44e4d88aa2671afc29f5b, entries=150, sequenceid=198, filesize=11.9 K 2024-11-27T16:21:05,512 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/C/f8140f3ba7ff429cb2a72fe10adf911f as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/C/f8140f3ba7ff429cb2a72fe10adf911f 2024-11-27T16:21:05,520 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/C/f8140f3ba7ff429cb2a72fe10adf911f, entries=150, sequenceid=198, filesize=11.9 K 2024-11-27T16:21:05,521 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~161.02 KB/164880, heapSize ~422.58 KB/432720, currentSize=46.96 KB/48090 for d498a187112eb3635082ffac2dfb4cf9 in 246ms, sequenceid=198, compaction requested=false 2024-11-27T16:21:05,521 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for d498a187112eb3635082ffac2dfb4cf9: 2024-11-27T16:21:05,594 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:21:05,595 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=19 2024-11-27T16:21:05,595 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9. 2024-11-27T16:21:05,595 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2837): Flushing d498a187112eb3635082ffac2dfb4cf9 3/3 column families, dataSize=46.96 KB heapSize=123.80 KB 2024-11-27T16:21:05,596 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d498a187112eb3635082ffac2dfb4cf9, store=A 2024-11-27T16:21:05,596 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:21:05,596 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d498a187112eb3635082ffac2dfb4cf9, store=B 2024-11-27T16:21:05,596 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:21:05,596 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d498a187112eb3635082ffac2dfb4cf9, store=C 2024-11-27T16:21:05,596 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:21:05,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(8581): Flush requested on d498a187112eb3635082ffac2dfb4cf9 2024-11-27T16:21:05,603 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9. as already flushing 2024-11-27T16:21:05,611 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/A/fb96e196b2fb4b47947f090b4865de76 is 50, key is test_row_0/A:col10/1732724465286/Put/seqid=0 2024-11-27T16:21:05,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=18 2024-11-27T16:21:05,650 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:05,650 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:05,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41120 deadline: 1732724525641, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:05,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41170 deadline: 1732724525641, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:05,651 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:05,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41106 deadline: 1732724525642, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:05,651 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:05,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41100 deadline: 1732724525643, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:05,653 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:05,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41130 deadline: 1732724525647, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:05,655 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073741881_1057 (size=12151) 2024-11-27T16:21:05,737 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/B/d8c1cc8878ab4e40bdb2c42f38b96cb1 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/B/d8c1cc8878ab4e40bdb2c42f38b96cb1 2024-11-27T16:21:05,754 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:05,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41120 deadline: 1732724525753, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:05,755 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:05,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41106 deadline: 1732724525753, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:05,757 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:05,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41170 deadline: 1732724525754, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:05,758 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d498a187112eb3635082ffac2dfb4cf9/B of d498a187112eb3635082ffac2dfb4cf9 into d8c1cc8878ab4e40bdb2c42f38b96cb1(size=12.3 K), total size for store is 24.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T16:21:05,758 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d498a187112eb3635082ffac2dfb4cf9: 2024-11-27T16:21:05,758 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9., storeName=d498a187112eb3635082ffac2dfb4cf9/B, priority=13, startTime=1732724465240; duration=0sec 2024-11-27T16:21:05,758 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T16:21:05,758 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d498a187112eb3635082ffac2dfb4cf9:B 2024-11-27T16:21:05,758 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:05,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41100 deadline: 1732724525756, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:05,759 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:05,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41130 deadline: 1732724525756, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:05,960 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:05,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41106 deadline: 1732724525957, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:05,960 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:05,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41120 deadline: 1732724525958, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:05,961 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:05,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41170 deadline: 1732724525960, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:05,964 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:05,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41100 deadline: 1732724525962, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:05,964 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:05,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41130 deadline: 1732724525964, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:06,057 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=210 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/A/fb96e196b2fb4b47947f090b4865de76 2024-11-27T16:21:06,072 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/B/93d8f66040564ff4a5a45b1242126ebb is 50, key is test_row_0/B:col10/1732724465286/Put/seqid=0 2024-11-27T16:21:06,114 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073741882_1058 (size=12151) 2024-11-27T16:21:06,267 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:06,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41120 deadline: 1732724526264, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:06,267 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:06,268 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:06,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41106 deadline: 1732724526264, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:06,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41170 deadline: 1732724526265, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:06,269 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:06,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41130 deadline: 1732724526269, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:06,271 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:06,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41100 deadline: 1732724526270, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:06,516 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=210 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/B/93d8f66040564ff4a5a45b1242126ebb 2024-11-27T16:21:06,530 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/C/6ef4bf9868224957829f335584a52c06 is 50, key is test_row_0/C:col10/1732724465286/Put/seqid=0 2024-11-27T16:21:06,545 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073741883_1059 (size=12151) 2024-11-27T16:21:06,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=18 2024-11-27T16:21:06,771 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:06,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41120 deadline: 1732724526770, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:06,774 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:06,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41170 deadline: 1732724526773, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:06,775 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:06,777 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:06,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41130 deadline: 1732724526773, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:06,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41106 deadline: 1732724526774, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:06,779 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:06,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41100 deadline: 1732724526779, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:06,946 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=210 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/C/6ef4bf9868224957829f335584a52c06 2024-11-27T16:21:06,955 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/A/fb96e196b2fb4b47947f090b4865de76 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/A/fb96e196b2fb4b47947f090b4865de76 2024-11-27T16:21:06,964 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/A/fb96e196b2fb4b47947f090b4865de76, entries=150, sequenceid=210, filesize=11.9 K 2024-11-27T16:21:06,965 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/B/93d8f66040564ff4a5a45b1242126ebb as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/B/93d8f66040564ff4a5a45b1242126ebb 2024-11-27T16:21:06,975 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/B/93d8f66040564ff4a5a45b1242126ebb, entries=150, sequenceid=210, filesize=11.9 K 2024-11-27T16:21:06,976 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/C/6ef4bf9868224957829f335584a52c06 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/C/6ef4bf9868224957829f335584a52c06 2024-11-27T16:21:06,987 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/C/6ef4bf9868224957829f335584a52c06, entries=150, sequenceid=210, filesize=11.9 K 2024-11-27T16:21:06,988 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(3040): Finished flush of dataSize ~46.96 KB/48090, heapSize ~123.75 KB/126720, currentSize=154.31 KB/158010 for d498a187112eb3635082ffac2dfb4cf9 in 1393ms, sequenceid=210, compaction requested=true 2024-11-27T16:21:06,988 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2538): Flush status journal for d498a187112eb3635082ffac2dfb4cf9: 2024-11-27T16:21:06,988 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9. 2024-11-27T16:21:06,989 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=19 2024-11-27T16:21:06,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4106): Remote procedure done, pid=19 2024-11-27T16:21:06,994 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=19, resume processing ppid=18 2024-11-27T16:21:06,994 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=19, ppid=18, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.4790 sec 2024-11-27T16:21:06,997 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=18, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=18, table=TestAcidGuarantees in 2.4890 sec 2024-11-27T16:21:07,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(8581): Flush requested on d498a187112eb3635082ffac2dfb4cf9 2024-11-27T16:21:07,785 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing d498a187112eb3635082ffac2dfb4cf9 3/3 column families, dataSize=161.02 KB heapSize=422.63 KB 2024-11-27T16:21:07,786 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d498a187112eb3635082ffac2dfb4cf9, store=A 2024-11-27T16:21:07,786 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:21:07,786 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d498a187112eb3635082ffac2dfb4cf9, store=B 2024-11-27T16:21:07,786 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:21:07,786 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d498a187112eb3635082ffac2dfb4cf9, store=C 2024-11-27T16:21:07,786 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:21:07,794 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/A/edf40a837cbb42b9896ad8ba256f8aad is 50, key is test_row_0/A:col10/1732724467783/Put/seqid=0 2024-11-27T16:21:07,799 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:07,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41100 deadline: 1732724527792, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:07,800 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:07,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41120 deadline: 1732724527793, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:07,801 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:07,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41106 deadline: 1732724527796, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:07,802 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:07,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41170 deadline: 1732724527798, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:07,805 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:07,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41130 deadline: 1732724527800, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:07,816 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073741884_1060 (size=14541) 2024-11-27T16:21:07,817 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=238 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/A/edf40a837cbb42b9896ad8ba256f8aad 2024-11-27T16:21:07,830 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/B/d9662dd7ae8c40758e7366e68e1a0757 is 50, key is test_row_0/B:col10/1732724467783/Put/seqid=0 2024-11-27T16:21:07,862 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073741885_1061 (size=12151) 2024-11-27T16:21:07,863 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=238 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/B/d9662dd7ae8c40758e7366e68e1a0757 2024-11-27T16:21:07,894 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/C/8ac21d2deaf742cfb7bebcd503531d3d is 50, key is test_row_0/C:col10/1732724467783/Put/seqid=0 2024-11-27T16:21:07,904 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:07,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41120 deadline: 1732724527902, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:07,905 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:07,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41106 deadline: 1732724527903, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:07,906 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:07,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41170 deadline: 1732724527904, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:07,908 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:07,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41130 deadline: 1732724527906, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:07,925 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073741886_1062 (size=12151) 2024-11-27T16:21:08,107 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:08,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41120 deadline: 1732724528106, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:08,110 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:08,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41106 deadline: 1732724528109, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:08,112 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:08,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41130 deadline: 1732724528110, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:08,120 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:08,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41170 deadline: 1732724528119, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:08,326 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=238 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/C/8ac21d2deaf742cfb7bebcd503531d3d 2024-11-27T16:21:08,335 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/A/edf40a837cbb42b9896ad8ba256f8aad as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/A/edf40a837cbb42b9896ad8ba256f8aad 2024-11-27T16:21:08,342 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/A/edf40a837cbb42b9896ad8ba256f8aad, entries=200, sequenceid=238, filesize=14.2 K 2024-11-27T16:21:08,346 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/B/d9662dd7ae8c40758e7366e68e1a0757 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/B/d9662dd7ae8c40758e7366e68e1a0757 2024-11-27T16:21:08,354 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/B/d9662dd7ae8c40758e7366e68e1a0757, entries=150, sequenceid=238, filesize=11.9 K 2024-11-27T16:21:08,355 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/C/8ac21d2deaf742cfb7bebcd503531d3d as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/C/8ac21d2deaf742cfb7bebcd503531d3d 2024-11-27T16:21:08,381 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/C/8ac21d2deaf742cfb7bebcd503531d3d, entries=150, sequenceid=238, filesize=11.9 K 2024-11-27T16:21:08,384 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~161.02 KB/164880, heapSize ~422.58 KB/432720, currentSize=46.96 KB/48090 for d498a187112eb3635082ffac2dfb4cf9 in 598ms, sequenceid=238, compaction requested=true 2024-11-27T16:21:08,384 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for d498a187112eb3635082ffac2dfb4cf9: 2024-11-27T16:21:08,386 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-27T16:21:08,387 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d498a187112eb3635082ffac2dfb4cf9:A, priority=-2147483648, current under compaction store size is 1 2024-11-27T16:21:08,387 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T16:21:08,387 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-27T16:21:08,387 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d498a187112eb3635082ffac2dfb4cf9:B, priority=-2147483648, current under compaction store size is 2 2024-11-27T16:21:08,388 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T16:21:08,389 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d498a187112eb3635082ffac2dfb4cf9:C, priority=-2147483648, current under compaction store size is 3 2024-11-27T16:21:08,389 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-27T16:21:08,390 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 51404 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-27T16:21:08,390 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49014 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-27T16:21:08,390 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HStore(1540): d498a187112eb3635082ffac2dfb4cf9/A is initiating minor compaction (all files) 2024-11-27T16:21:08,390 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HStore(1540): d498a187112eb3635082ffac2dfb4cf9/B is initiating minor compaction (all files) 2024-11-27T16:21:08,390 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d498a187112eb3635082ffac2dfb4cf9/A in TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9. 2024-11-27T16:21:08,390 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d498a187112eb3635082ffac2dfb4cf9/B in TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9. 2024-11-27T16:21:08,390 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/A/3c7df496ef5b49d7a39271bee7e00519, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/A/b7493f62cc1b4d1784057d9aadf636dd, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/A/fb96e196b2fb4b47947f090b4865de76, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/A/edf40a837cbb42b9896ad8ba256f8aad] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp, totalSize=50.2 K 2024-11-27T16:21:08,390 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/B/d8c1cc8878ab4e40bdb2c42f38b96cb1, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/B/3a3e6740b0c44e4d88aa2671afc29f5b, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/B/93d8f66040564ff4a5a45b1242126ebb, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/B/d9662dd7ae8c40758e7366e68e1a0757] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp, totalSize=47.9 K 2024-11-27T16:21:08,391 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting d8c1cc8878ab4e40bdb2c42f38b96cb1, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=171, earliestPutTs=1732724464586 2024-11-27T16:21:08,391 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3c7df496ef5b49d7a39271bee7e00519, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=171, earliestPutTs=1732724464586 2024-11-27T16:21:08,392 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting 3a3e6740b0c44e4d88aa2671afc29f5b, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=198, earliestPutTs=1732724464657 2024-11-27T16:21:08,395 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.Compactor(224): Compacting b7493f62cc1b4d1784057d9aadf636dd, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=198, earliestPutTs=1732724464657 2024-11-27T16:21:08,396 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting 93d8f66040564ff4a5a45b1242126ebb, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=210, earliestPutTs=1732724465286 2024-11-27T16:21:08,396 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting d9662dd7ae8c40758e7366e68e1a0757, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=238, earliestPutTs=1732724465639 2024-11-27T16:21:08,396 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.Compactor(224): Compacting fb96e196b2fb4b47947f090b4865de76, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=210, earliestPutTs=1732724465286 2024-11-27T16:21:08,397 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.Compactor(224): Compacting edf40a837cbb42b9896ad8ba256f8aad, keycount=200, bloomtype=ROW, size=14.2 K, encoding=NONE, compression=NONE, seqNum=238, earliestPutTs=1732724465639 2024-11-27T16:21:08,413 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): d498a187112eb3635082ffac2dfb4cf9#B#compaction#48 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-27T16:21:08,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(8581): Flush requested on d498a187112eb3635082ffac2dfb4cf9 2024-11-27T16:21:08,415 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing d498a187112eb3635082ffac2dfb4cf9 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-27T16:21:08,415 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d498a187112eb3635082ffac2dfb4cf9, store=A 2024-11-27T16:21:08,415 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:21:08,415 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d498a187112eb3635082ffac2dfb4cf9, store=B 2024-11-27T16:21:08,415 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:21:08,415 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d498a187112eb3635082ffac2dfb4cf9, store=C 2024-11-27T16:21:08,415 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:21:08,416 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/B/f694ba9928b241008f5fffd207210b16 is 50, key is test_row_0/B:col10/1732724467783/Put/seqid=0 2024-11-27T16:21:08,427 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): d498a187112eb3635082ffac2dfb4cf9#A#compaction#49 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-27T16:21:08,427 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/A/60dff7215c8f4493b288d92dacaa258e is 50, key is test_row_0/A:col10/1732724468412/Put/seqid=0 2024-11-27T16:21:08,428 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/A/856076fddf7e400ea994f60dda84a73c is 50, key is test_row_0/A:col10/1732724467783/Put/seqid=0 2024-11-27T16:21:08,456 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073741888_1064 (size=12147) 2024-11-27T16:21:08,458 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=249 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/A/60dff7215c8f4493b288d92dacaa258e 2024-11-27T16:21:08,459 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073741887_1063 (size=12697) 2024-11-27T16:21:08,472 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/B/f694ba9928b241008f5fffd207210b16 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/B/f694ba9928b241008f5fffd207210b16 2024-11-27T16:21:08,481 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in d498a187112eb3635082ffac2dfb4cf9/B of d498a187112eb3635082ffac2dfb4cf9 into f694ba9928b241008f5fffd207210b16(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T16:21:08,481 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d498a187112eb3635082ffac2dfb4cf9: 2024-11-27T16:21:08,481 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:08,481 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9., storeName=d498a187112eb3635082ffac2dfb4cf9/B, priority=12, startTime=1732724468387; duration=0sec 2024-11-27T16:21:08,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41106 deadline: 1732724528475, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:08,481 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:08,481 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-27T16:21:08,482 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d498a187112eb3635082ffac2dfb4cf9:B 2024-11-27T16:21:08,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41130 deadline: 1732724528473, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:08,482 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-27T16:21:08,483 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:08,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41120 deadline: 1732724528477, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:08,483 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:08,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41170 deadline: 1732724528480, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:08,485 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073741889_1065 (size=12697) 2024-11-27T16:21:08,485 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/B/86fd7b60133a44868b62e9b63fc138c5 is 50, key is test_row_0/B:col10/1732724468412/Put/seqid=0 2024-11-27T16:21:08,487 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49014 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-27T16:21:08,487 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HStore(1540): d498a187112eb3635082ffac2dfb4cf9/C is initiating minor compaction (all files) 2024-11-27T16:21:08,487 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d498a187112eb3635082ffac2dfb4cf9/C in TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9. 2024-11-27T16:21:08,487 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/C/052b42f85efd48bbb005a447999dabda, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/C/f8140f3ba7ff429cb2a72fe10adf911f, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/C/6ef4bf9868224957829f335584a52c06, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/C/8ac21d2deaf742cfb7bebcd503531d3d] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp, totalSize=47.9 K 2024-11-27T16:21:08,487 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting 052b42f85efd48bbb005a447999dabda, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=171, earliestPutTs=1732724464586 2024-11-27T16:21:08,488 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting f8140f3ba7ff429cb2a72fe10adf911f, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=198, earliestPutTs=1732724464657 2024-11-27T16:21:08,489 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting 6ef4bf9868224957829f335584a52c06, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=210, earliestPutTs=1732724465286 2024-11-27T16:21:08,489 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting 8ac21d2deaf742cfb7bebcd503531d3d, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=238, earliestPutTs=1732724465639 2024-11-27T16:21:08,508 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): d498a187112eb3635082ffac2dfb4cf9#C#compaction#52 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-27T16:21:08,508 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/C/f4fa77d91b8f4671b3164497de3c4ab9 is 50, key is test_row_0/C:col10/1732724467783/Put/seqid=0 2024-11-27T16:21:08,525 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073741890_1066 (size=9757) 2024-11-27T16:21:08,529 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=249 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/B/86fd7b60133a44868b62e9b63fc138c5 2024-11-27T16:21:08,546 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073741891_1067 (size=12697) 2024-11-27T16:21:08,554 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/C/882f97eb7d134e268b01cd978c05886b is 50, key is test_row_0/C:col10/1732724468412/Put/seqid=0 2024-11-27T16:21:08,557 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/C/f4fa77d91b8f4671b3164497de3c4ab9 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/C/f4fa77d91b8f4671b3164497de3c4ab9 2024-11-27T16:21:08,567 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in d498a187112eb3635082ffac2dfb4cf9/C of d498a187112eb3635082ffac2dfb4cf9 into f4fa77d91b8f4671b3164497de3c4ab9(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T16:21:08,567 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d498a187112eb3635082ffac2dfb4cf9: 2024-11-27T16:21:08,567 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9., storeName=d498a187112eb3635082ffac2dfb4cf9/C, priority=12, startTime=1732724468389; duration=0sec 2024-11-27T16:21:08,567 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T16:21:08,567 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d498a187112eb3635082ffac2dfb4cf9:C 2024-11-27T16:21:08,578 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073741892_1068 (size=9757) 2024-11-27T16:21:08,579 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=249 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/C/882f97eb7d134e268b01cd978c05886b 2024-11-27T16:21:08,585 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:08,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41106 deadline: 1732724528583, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:08,586 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:08,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41170 deadline: 1732724528585, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:08,587 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:08,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41130 deadline: 1732724528587, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:08,588 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:08,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41120 deadline: 1732724528587, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:08,590 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/A/60dff7215c8f4493b288d92dacaa258e as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/A/60dff7215c8f4493b288d92dacaa258e 2024-11-27T16:21:08,599 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/A/60dff7215c8f4493b288d92dacaa258e, entries=150, sequenceid=249, filesize=11.9 K 2024-11-27T16:21:08,600 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/B/86fd7b60133a44868b62e9b63fc138c5 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/B/86fd7b60133a44868b62e9b63fc138c5 2024-11-27T16:21:08,608 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/B/86fd7b60133a44868b62e9b63fc138c5, entries=100, sequenceid=249, filesize=9.5 K 2024-11-27T16:21:08,612 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/C/882f97eb7d134e268b01cd978c05886b as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/C/882f97eb7d134e268b01cd978c05886b 2024-11-27T16:21:08,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=18 2024-11-27T16:21:08,619 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 18 completed 2024-11-27T16:21:08,620 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/C/882f97eb7d134e268b01cd978c05886b, entries=100, sequenceid=249, filesize=9.5 K 2024-11-27T16:21:08,622 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-27T16:21:08,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] procedure2.ProcedureExecutor(1098): Stored pid=20, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=20, table=TestAcidGuarantees 2024-11-27T16:21:08,624 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for d498a187112eb3635082ffac2dfb4cf9 in 208ms, sequenceid=249, compaction requested=false 2024-11-27T16:21:08,624 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for d498a187112eb3635082ffac2dfb4cf9: 2024-11-27T16:21:08,624 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=20, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=20, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-27T16:21:08,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-11-27T16:21:08,625 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=20, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=20, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-27T16:21:08,625 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=21, ppid=20, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-27T16:21:08,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-11-27T16:21:08,777 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:21:08,778 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=21 2024-11-27T16:21:08,778 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9. 2024-11-27T16:21:08,779 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(2837): Flushing d498a187112eb3635082ffac2dfb4cf9 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-27T16:21:08,779 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d498a187112eb3635082ffac2dfb4cf9, store=A 2024-11-27T16:21:08,779 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:21:08,779 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d498a187112eb3635082ffac2dfb4cf9, store=B 2024-11-27T16:21:08,779 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:21:08,779 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d498a187112eb3635082ffac2dfb4cf9, store=C 2024-11-27T16:21:08,779 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:21:08,786 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/A/df5f4366b63d48f28cc29b867927b3c0 is 50, key is test_row_0/A:col10/1732724468478/Put/seqid=0 2024-11-27T16:21:08,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(8581): Flush requested on d498a187112eb3635082ffac2dfb4cf9 2024-11-27T16:21:08,791 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9. as already flushing 2024-11-27T16:21:08,806 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:08,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41120 deadline: 1732724528803, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:08,809 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:08,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41130 deadline: 1732724528806, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:08,810 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:08,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41106 deadline: 1732724528806, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:08,811 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:08,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41170 deadline: 1732724528806, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:08,818 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073741893_1069 (size=12301) 2024-11-27T16:21:08,819 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=276 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/A/df5f4366b63d48f28cc29b867927b3c0 2024-11-27T16:21:08,838 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/B/f104be8048e743268ca3394568acf0fe is 50, key is test_row_0/B:col10/1732724468478/Put/seqid=0 2024-11-27T16:21:08,874 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073741894_1070 (size=12301) 2024-11-27T16:21:08,895 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/A/856076fddf7e400ea994f60dda84a73c as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/A/856076fddf7e400ea994f60dda84a73c 2024-11-27T16:21:08,905 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in d498a187112eb3635082ffac2dfb4cf9/A of d498a187112eb3635082ffac2dfb4cf9 into 856076fddf7e400ea994f60dda84a73c(size=12.4 K), total size for store is 24.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T16:21:08,905 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d498a187112eb3635082ffac2dfb4cf9: 2024-11-27T16:21:08,905 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9., storeName=d498a187112eb3635082ffac2dfb4cf9/A, priority=12, startTime=1732724468386; duration=0sec 2024-11-27T16:21:08,905 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T16:21:08,905 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d498a187112eb3635082ffac2dfb4cf9:A 2024-11-27T16:21:08,910 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:08,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41120 deadline: 1732724528908, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:08,913 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:08,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41130 deadline: 1732724528912, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:08,915 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:08,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41170 deadline: 1732724528912, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:08,916 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:08,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41106 deadline: 1732724528913, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:08,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-11-27T16:21:09,115 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:09,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41120 deadline: 1732724529113, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:09,119 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:09,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41130 deadline: 1732724529115, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:09,120 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:09,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41106 deadline: 1732724529118, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:09,119 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:09,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41170 deadline: 1732724529117, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:09,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-11-27T16:21:09,276 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=276 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/B/f104be8048e743268ca3394568acf0fe 2024-11-27T16:21:09,291 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/C/6cf8ac1cc5f440ee8c5c91b5372bef85 is 50, key is test_row_0/C:col10/1732724468478/Put/seqid=0 2024-11-27T16:21:09,308 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073741895_1071 (size=12301) 2024-11-27T16:21:09,420 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:09,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41120 deadline: 1732724529419, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:09,425 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:09,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41130 deadline: 1732724529423, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:09,425 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:09,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41170 deadline: 1732724529424, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:09,426 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:09,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41106 deadline: 1732724529424, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:09,711 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=276 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/C/6cf8ac1cc5f440ee8c5c91b5372bef85 2024-11-27T16:21:09,719 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/A/df5f4366b63d48f28cc29b867927b3c0 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/A/df5f4366b63d48f28cc29b867927b3c0 2024-11-27T16:21:09,725 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/A/df5f4366b63d48f28cc29b867927b3c0, entries=150, sequenceid=276, filesize=12.0 K 2024-11-27T16:21:09,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-11-27T16:21:09,733 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/B/f104be8048e743268ca3394568acf0fe as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/B/f104be8048e743268ca3394568acf0fe 2024-11-27T16:21:09,743 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/B/f104be8048e743268ca3394568acf0fe, entries=150, sequenceid=276, filesize=12.0 K 2024-11-27T16:21:09,744 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/C/6cf8ac1cc5f440ee8c5c91b5372bef85 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/C/6cf8ac1cc5f440ee8c5c91b5372bef85 2024-11-27T16:21:09,752 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/C/6cf8ac1cc5f440ee8c5c91b5372bef85, entries=150, sequenceid=276, filesize=12.0 K 2024-11-27T16:21:09,754 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=67.09 KB/68700 for d498a187112eb3635082ffac2dfb4cf9 in 975ms, sequenceid=276, compaction requested=true 2024-11-27T16:21:09,754 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(2538): Flush status journal for d498a187112eb3635082ffac2dfb4cf9: 2024-11-27T16:21:09,754 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9. 2024-11-27T16:21:09,754 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=21 2024-11-27T16:21:09,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4106): Remote procedure done, pid=21 2024-11-27T16:21:09,758 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=21, resume processing ppid=20 2024-11-27T16:21:09,758 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=21, ppid=20, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.1310 sec 2024-11-27T16:21:09,761 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=20, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=20, table=TestAcidGuarantees in 1.1370 sec 2024-11-27T16:21:09,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(8581): Flush requested on d498a187112eb3635082ffac2dfb4cf9 2024-11-27T16:21:09,813 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing d498a187112eb3635082ffac2dfb4cf9 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-11-27T16:21:09,813 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d498a187112eb3635082ffac2dfb4cf9, store=A 2024-11-27T16:21:09,813 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:21:09,813 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d498a187112eb3635082ffac2dfb4cf9, store=B 2024-11-27T16:21:09,813 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:21:09,813 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d498a187112eb3635082ffac2dfb4cf9, store=C 2024-11-27T16:21:09,814 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:21:09,833 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/A/15a7ef453bec4bb5a201c81ff09c8a17 is 50, key is test_row_0/A:col10/1732724469810/Put/seqid=0 2024-11-27T16:21:09,862 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073741896_1072 (size=12301) 2024-11-27T16:21:09,863 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=291 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/A/15a7ef453bec4bb5a201c81ff09c8a17 2024-11-27T16:21:09,898 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/B/898aa33bd81a4d1eb4018f2bfe4f3edb is 50, key is test_row_0/B:col10/1732724469810/Put/seqid=0 2024-11-27T16:21:09,917 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073741897_1073 (size=12301) 2024-11-27T16:21:09,917 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:09,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41100 deadline: 1732724529913, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:09,918 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=291 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/B/898aa33bd81a4d1eb4018f2bfe4f3edb 2024-11-27T16:21:09,931 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:09,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41120 deadline: 1732724529926, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:09,931 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:09,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41170 deadline: 1732724529927, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:09,932 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:09,933 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:09,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41106 deadline: 1732724529927, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:09,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41130 deadline: 1732724529930, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:09,942 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/C/652298c25aec4a0b9f63c2f9c0c99c6a is 50, key is test_row_0/C:col10/1732724469810/Put/seqid=0 2024-11-27T16:21:09,974 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073741898_1074 (size=12301) 2024-11-27T16:21:09,975 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=291 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/C/652298c25aec4a0b9f63c2f9c0c99c6a 2024-11-27T16:21:09,986 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/A/15a7ef453bec4bb5a201c81ff09c8a17 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/A/15a7ef453bec4bb5a201c81ff09c8a17 2024-11-27T16:21:09,995 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/A/15a7ef453bec4bb5a201c81ff09c8a17, entries=150, sequenceid=291, filesize=12.0 K 2024-11-27T16:21:09,996 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/B/898aa33bd81a4d1eb4018f2bfe4f3edb as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/B/898aa33bd81a4d1eb4018f2bfe4f3edb 2024-11-27T16:21:10,004 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/B/898aa33bd81a4d1eb4018f2bfe4f3edb, entries=150, sequenceid=291, filesize=12.0 K 2024-11-27T16:21:10,006 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/C/652298c25aec4a0b9f63c2f9c0c99c6a as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/C/652298c25aec4a0b9f63c2f9c0c99c6a 2024-11-27T16:21:10,014 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/C/652298c25aec4a0b9f63c2f9c0c99c6a, entries=150, sequenceid=291, filesize=12.0 K 2024-11-27T16:21:10,016 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=127.47 KB/130530 for d498a187112eb3635082ffac2dfb4cf9 in 203ms, sequenceid=291, compaction requested=true 2024-11-27T16:21:10,016 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for d498a187112eb3635082ffac2dfb4cf9: 2024-11-27T16:21:10,016 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d498a187112eb3635082ffac2dfb4cf9:A, priority=-2147483648, current under compaction store size is 1 2024-11-27T16:21:10,016 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T16:21:10,016 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-27T16:21:10,016 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-27T16:21:10,018 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d498a187112eb3635082ffac2dfb4cf9:B, priority=-2147483648, current under compaction store size is 2 2024-11-27T16:21:10,018 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T16:21:10,018 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d498a187112eb3635082ffac2dfb4cf9:C, priority=-2147483648, current under compaction store size is 3 2024-11-27T16:21:10,019 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 47056 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-27T16:21:10,019 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49446 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-27T16:21:10,019 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HStore(1540): d498a187112eb3635082ffac2dfb4cf9/A is initiating minor compaction (all files) 2024-11-27T16:21:10,019 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HStore(1540): d498a187112eb3635082ffac2dfb4cf9/B is initiating minor compaction (all files) 2024-11-27T16:21:10,019 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d498a187112eb3635082ffac2dfb4cf9/A in TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9. 2024-11-27T16:21:10,019 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d498a187112eb3635082ffac2dfb4cf9/B in TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9. 2024-11-27T16:21:10,019 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/B/f694ba9928b241008f5fffd207210b16, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/B/86fd7b60133a44868b62e9b63fc138c5, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/B/f104be8048e743268ca3394568acf0fe, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/B/898aa33bd81a4d1eb4018f2bfe4f3edb] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp, totalSize=46.0 K 2024-11-27T16:21:10,019 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/A/856076fddf7e400ea994f60dda84a73c, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/A/60dff7215c8f4493b288d92dacaa258e, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/A/df5f4366b63d48f28cc29b867927b3c0, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/A/15a7ef453bec4bb5a201c81ff09c8a17] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp, totalSize=48.3 K 2024-11-27T16:21:10,020 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting f694ba9928b241008f5fffd207210b16, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=238, earliestPutTs=1732724465639 2024-11-27T16:21:10,020 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 856076fddf7e400ea994f60dda84a73c, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=238, earliestPutTs=1732724465639 2024-11-27T16:21:10,020 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-27T16:21:10,021 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting 86fd7b60133a44868b62e9b63fc138c5, keycount=100, bloomtype=ROW, size=9.5 K, encoding=NONE, compression=NONE, seqNum=249, earliestPutTs=1732724467796 2024-11-27T16:21:10,021 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 60dff7215c8f4493b288d92dacaa258e, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=249, earliestPutTs=1732724467796 2024-11-27T16:21:10,021 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting f104be8048e743268ca3394568acf0fe, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=276, earliestPutTs=1732724468471 2024-11-27T16:21:10,021 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.Compactor(224): Compacting df5f4366b63d48f28cc29b867927b3c0, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=276, earliestPutTs=1732724468471 2024-11-27T16:21:10,022 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting 898aa33bd81a4d1eb4018f2bfe4f3edb, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=291, earliestPutTs=1732724468789 2024-11-27T16:21:10,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(8581): Flush requested on d498a187112eb3635082ffac2dfb4cf9 2024-11-27T16:21:10,023 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 15a7ef453bec4bb5a201c81ff09c8a17, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=291, earliestPutTs=1732724468789 2024-11-27T16:21:10,023 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing d498a187112eb3635082ffac2dfb4cf9 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-11-27T16:21:10,023 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d498a187112eb3635082ffac2dfb4cf9, store=A 2024-11-27T16:21:10,023 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:21:10,023 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d498a187112eb3635082ffac2dfb4cf9, store=B 2024-11-27T16:21:10,024 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:21:10,024 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d498a187112eb3635082ffac2dfb4cf9, store=C 2024-11-27T16:21:10,024 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:21:10,032 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/A/20033b0a91f1489fb5842d5f0158c835 is 50, key is test_row_0/A:col10/1732724470021/Put/seqid=0 2024-11-27T16:21:10,036 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): d498a187112eb3635082ffac2dfb4cf9#B#compaction#61 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-27T16:21:10,037 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/B/1e47e4f1d38f48bfa265f8b0438e35da is 50, key is test_row_0/B:col10/1732724469810/Put/seqid=0 2024-11-27T16:21:10,042 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): d498a187112eb3635082ffac2dfb4cf9#A#compaction#62 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-27T16:21:10,043 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/A/8c5c05810c324c34a589cfc0e3d2716a is 50, key is test_row_0/A:col10/1732724469810/Put/seqid=0 2024-11-27T16:21:10,045 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073741900_1076 (size=12983) 2024-11-27T16:21:10,046 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073741899_1075 (size=14741) 2024-11-27T16:21:10,051 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073741901_1077 (size=12983) 2024-11-27T16:21:10,056 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/B/1e47e4f1d38f48bfa265f8b0438e35da as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/B/1e47e4f1d38f48bfa265f8b0438e35da 2024-11-27T16:21:10,065 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/A/8c5c05810c324c34a589cfc0e3d2716a as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/A/8c5c05810c324c34a589cfc0e3d2716a 2024-11-27T16:21:10,072 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in d498a187112eb3635082ffac2dfb4cf9/B of d498a187112eb3635082ffac2dfb4cf9 into 1e47e4f1d38f48bfa265f8b0438e35da(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T16:21:10,072 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d498a187112eb3635082ffac2dfb4cf9: 2024-11-27T16:21:10,072 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9., storeName=d498a187112eb3635082ffac2dfb4cf9/B, priority=12, startTime=1732724470016; duration=0sec 2024-11-27T16:21:10,072 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-27T16:21:10,073 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d498a187112eb3635082ffac2dfb4cf9:B 2024-11-27T16:21:10,073 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-27T16:21:10,073 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in d498a187112eb3635082ffac2dfb4cf9/A of d498a187112eb3635082ffac2dfb4cf9 into 8c5c05810c324c34a589cfc0e3d2716a(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T16:21:10,073 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d498a187112eb3635082ffac2dfb4cf9: 2024-11-27T16:21:10,073 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9., storeName=d498a187112eb3635082ffac2dfb4cf9/A, priority=12, startTime=1732724470016; duration=0sec 2024-11-27T16:21:10,073 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T16:21:10,073 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d498a187112eb3635082ffac2dfb4cf9:A 2024-11-27T16:21:10,076 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 47056 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-27T16:21:10,077 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HStore(1540): d498a187112eb3635082ffac2dfb4cf9/C is initiating minor compaction (all files) 2024-11-27T16:21:10,077 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d498a187112eb3635082ffac2dfb4cf9/C in TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9. 2024-11-27T16:21:10,077 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/C/f4fa77d91b8f4671b3164497de3c4ab9, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/C/882f97eb7d134e268b01cd978c05886b, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/C/6cf8ac1cc5f440ee8c5c91b5372bef85, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/C/652298c25aec4a0b9f63c2f9c0c99c6a] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp, totalSize=46.0 K 2024-11-27T16:21:10,077 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting f4fa77d91b8f4671b3164497de3c4ab9, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=238, earliestPutTs=1732724465639 2024-11-27T16:21:10,080 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting 882f97eb7d134e268b01cd978c05886b, keycount=100, bloomtype=ROW, size=9.5 K, encoding=NONE, compression=NONE, seqNum=249, earliestPutTs=1732724467796 2024-11-27T16:21:10,081 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting 6cf8ac1cc5f440ee8c5c91b5372bef85, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=276, earliestPutTs=1732724468471 2024-11-27T16:21:10,082 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting 652298c25aec4a0b9f63c2f9c0c99c6a, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=291, earliestPutTs=1732724468789 2024-11-27T16:21:10,089 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:10,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41100 deadline: 1732724530088, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:10,113 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): d498a187112eb3635082ffac2dfb4cf9#C#compaction#63 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-27T16:21:10,114 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/C/8ac00526098745c2aa2df339cc2835fc is 50, key is test_row_0/C:col10/1732724469810/Put/seqid=0 2024-11-27T16:21:10,133 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073741902_1078 (size=12983) 2024-11-27T16:21:10,192 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:10,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41100 deadline: 1732724530191, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:10,395 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:10,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41100 deadline: 1732724530395, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:10,448 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=314 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/A/20033b0a91f1489fb5842d5f0158c835 2024-11-27T16:21:10,469 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/B/f7ad4c8fde584367b0918c9db0a2263d is 50, key is test_row_0/B:col10/1732724470021/Put/seqid=0 2024-11-27T16:21:10,490 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073741903_1079 (size=12301) 2024-11-27T16:21:10,491 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=314 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/B/f7ad4c8fde584367b0918c9db0a2263d 2024-11-27T16:21:10,506 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/C/cd784bb0ba514a36b63c42ed745070c5 is 50, key is test_row_0/C:col10/1732724470021/Put/seqid=0 2024-11-27T16:21:10,541 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073741904_1080 (size=12301) 2024-11-27T16:21:10,546 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=314 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/C/cd784bb0ba514a36b63c42ed745070c5 2024-11-27T16:21:10,546 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/C/8ac00526098745c2aa2df339cc2835fc as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/C/8ac00526098745c2aa2df339cc2835fc 2024-11-27T16:21:10,558 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/A/20033b0a91f1489fb5842d5f0158c835 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/A/20033b0a91f1489fb5842d5f0158c835 2024-11-27T16:21:10,561 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in d498a187112eb3635082ffac2dfb4cf9/C of d498a187112eb3635082ffac2dfb4cf9 into 8ac00526098745c2aa2df339cc2835fc(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T16:21:10,561 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d498a187112eb3635082ffac2dfb4cf9: 2024-11-27T16:21:10,561 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9., storeName=d498a187112eb3635082ffac2dfb4cf9/C, priority=12, startTime=1732724470018; duration=0sec 2024-11-27T16:21:10,561 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T16:21:10,561 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d498a187112eb3635082ffac2dfb4cf9:C 2024-11-27T16:21:10,567 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/A/20033b0a91f1489fb5842d5f0158c835, entries=200, sequenceid=314, filesize=14.4 K 2024-11-27T16:21:10,568 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/B/f7ad4c8fde584367b0918c9db0a2263d as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/B/f7ad4c8fde584367b0918c9db0a2263d 2024-11-27T16:21:10,576 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/B/f7ad4c8fde584367b0918c9db0a2263d, entries=150, sequenceid=314, filesize=12.0 K 2024-11-27T16:21:10,578 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/C/cd784bb0ba514a36b63c42ed745070c5 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/C/cd784bb0ba514a36b63c42ed745070c5 2024-11-27T16:21:10,588 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/C/cd784bb0ba514a36b63c42ed745070c5, entries=150, sequenceid=314, filesize=12.0 K 2024-11-27T16:21:10,590 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=67.09 KB/68700 for d498a187112eb3635082ffac2dfb4cf9 in 567ms, sequenceid=314, compaction requested=false 2024-11-27T16:21:10,590 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for d498a187112eb3635082ffac2dfb4cf9: 2024-11-27T16:21:10,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(8581): Flush requested on d498a187112eb3635082ffac2dfb4cf9 2024-11-27T16:21:10,699 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing d498a187112eb3635082ffac2dfb4cf9 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-11-27T16:21:10,699 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d498a187112eb3635082ffac2dfb4cf9, store=A 2024-11-27T16:21:10,700 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:21:10,700 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d498a187112eb3635082ffac2dfb4cf9, store=B 2024-11-27T16:21:10,700 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:21:10,700 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d498a187112eb3635082ffac2dfb4cf9, store=C 2024-11-27T16:21:10,700 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:21:10,708 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/A/88a89e647af54b01bca51190f1d2b51d is 50, key is test_row_0/A:col10/1732724470080/Put/seqid=0 2024-11-27T16:21:10,717 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073741905_1081 (size=12297) 2024-11-27T16:21:10,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-11-27T16:21:10,730 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 20 completed 2024-11-27T16:21:10,734 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-27T16:21:10,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] procedure2.ProcedureExecutor(1098): Stored pid=22, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=22, table=TestAcidGuarantees 2024-11-27T16:21:10,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=22 2024-11-27T16:21:10,736 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=22, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=22, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-27T16:21:10,737 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=22, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=22, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-27T16:21:10,737 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=23, ppid=22, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-27T16:21:10,763 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:10,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 162 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41100 deadline: 1732724530761, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:10,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=22 2024-11-27T16:21:10,865 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:10,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 164 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41100 deadline: 1732724530864, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:10,895 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:21:10,895 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=23 2024-11-27T16:21:10,895 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9. 2024-11-27T16:21:10,896 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9. as already flushing 2024-11-27T16:21:10,896 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9. 2024-11-27T16:21:10,896 ERROR [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] handler.RSProcedureHandler(58): pid=23 java.io.IOException: Unable to complete flush {ENCODED => d498a187112eb3635082ffac2dfb4cf9, NAME => 'TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:21:10,896 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=23 java.io.IOException: Unable to complete flush {ENCODED => d498a187112eb3635082ffac2dfb4cf9, NAME => 'TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:21:10,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4114): Remote procedure failed, pid=23 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d498a187112eb3635082ffac2dfb4cf9, NAME => 'TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d498a187112eb3635082ffac2dfb4cf9, NAME => 'TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:21:10,935 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:10,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41120 deadline: 1732724530932, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:10,936 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:10,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41130 deadline: 1732724530935, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:10,938 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:10,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41170 deadline: 1732724530938, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:10,944 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:10,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41106 deadline: 1732724530943, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:11,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=22 2024-11-27T16:21:11,049 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:21:11,049 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=23 2024-11-27T16:21:11,049 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9. 2024-11-27T16:21:11,049 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9. as already flushing 2024-11-27T16:21:11,049 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9. 2024-11-27T16:21:11,050 ERROR [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] handler.RSProcedureHandler(58): pid=23 java.io.IOException: Unable to complete flush {ENCODED => d498a187112eb3635082ffac2dfb4cf9, NAME => 'TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:21:11,050 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=23 java.io.IOException: Unable to complete flush {ENCODED => d498a187112eb3635082ffac2dfb4cf9, NAME => 'TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:21:11,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4114): Remote procedure failed, pid=23 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d498a187112eb3635082ffac2dfb4cf9, NAME => 'TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d498a187112eb3635082ffac2dfb4cf9, NAME => 'TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:21:11,069 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:11,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 166 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41100 deadline: 1732724531068, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:11,119 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=331 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/A/88a89e647af54b01bca51190f1d2b51d 2024-11-27T16:21:11,134 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/B/321a12869b5448f0a43b7a68929d5cf4 is 50, key is test_row_0/B:col10/1732724470080/Put/seqid=0 2024-11-27T16:21:11,159 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073741906_1082 (size=9857) 2024-11-27T16:21:11,160 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=331 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/B/321a12869b5448f0a43b7a68929d5cf4 2024-11-27T16:21:11,180 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/C/3209dfa5b1e2422c8befc1517b5d7c2c is 50, key is test_row_0/C:col10/1732724470080/Put/seqid=0 2024-11-27T16:21:11,202 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073741907_1083 (size=9857) 2024-11-27T16:21:11,202 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:21:11,203 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=331 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/C/3209dfa5b1e2422c8befc1517b5d7c2c 2024-11-27T16:21:11,203 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=23 2024-11-27T16:21:11,203 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9. 2024-11-27T16:21:11,203 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9. as already flushing 2024-11-27T16:21:11,203 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9. 2024-11-27T16:21:11,203 ERROR [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] handler.RSProcedureHandler(58): pid=23 java.io.IOException: Unable to complete flush {ENCODED => d498a187112eb3635082ffac2dfb4cf9, NAME => 'TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:21:11,203 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=23 java.io.IOException: Unable to complete flush {ENCODED => d498a187112eb3635082ffac2dfb4cf9, NAME => 'TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:21:11,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4114): Remote procedure failed, pid=23 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d498a187112eb3635082ffac2dfb4cf9, NAME => 'TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d498a187112eb3635082ffac2dfb4cf9, NAME => 'TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:21:11,212 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/A/88a89e647af54b01bca51190f1d2b51d as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/A/88a89e647af54b01bca51190f1d2b51d 2024-11-27T16:21:11,219 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/A/88a89e647af54b01bca51190f1d2b51d, entries=150, sequenceid=331, filesize=12.0 K 2024-11-27T16:21:11,220 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/B/321a12869b5448f0a43b7a68929d5cf4 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/B/321a12869b5448f0a43b7a68929d5cf4 2024-11-27T16:21:11,227 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/B/321a12869b5448f0a43b7a68929d5cf4, entries=100, sequenceid=331, filesize=9.6 K 2024-11-27T16:21:11,228 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/C/3209dfa5b1e2422c8befc1517b5d7c2c as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/C/3209dfa5b1e2422c8befc1517b5d7c2c 2024-11-27T16:21:11,236 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/C/3209dfa5b1e2422c8befc1517b5d7c2c, entries=100, sequenceid=331, filesize=9.6 K 2024-11-27T16:21:11,237 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=127.47 KB/130530 for d498a187112eb3635082ffac2dfb4cf9 in 538ms, sequenceid=331, compaction requested=true 2024-11-27T16:21:11,238 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for d498a187112eb3635082ffac2dfb4cf9: 2024-11-27T16:21:11,238 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d498a187112eb3635082ffac2dfb4cf9:A, priority=-2147483648, current under compaction store size is 1 2024-11-27T16:21:11,238 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T16:21:11,238 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-27T16:21:11,238 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-27T16:21:11,238 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d498a187112eb3635082ffac2dfb4cf9:B, priority=-2147483648, current under compaction store size is 2 2024-11-27T16:21:11,239 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T16:21:11,239 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d498a187112eb3635082ffac2dfb4cf9:C, priority=-2147483648, current under compaction store size is 3 2024-11-27T16:21:11,239 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-27T16:21:11,241 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 35141 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-27T16:21:11,241 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HStore(1540): d498a187112eb3635082ffac2dfb4cf9/B is initiating minor compaction (all files) 2024-11-27T16:21:11,241 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d498a187112eb3635082ffac2dfb4cf9/B in TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9. 2024-11-27T16:21:11,241 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/B/1e47e4f1d38f48bfa265f8b0438e35da, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/B/f7ad4c8fde584367b0918c9db0a2263d, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/B/321a12869b5448f0a43b7a68929d5cf4] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp, totalSize=34.3 K 2024-11-27T16:21:11,240 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 40021 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-27T16:21:11,242 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HStore(1540): d498a187112eb3635082ffac2dfb4cf9/A is initiating minor compaction (all files) 2024-11-27T16:21:11,242 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d498a187112eb3635082ffac2dfb4cf9/A in TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9. 2024-11-27T16:21:11,242 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/A/8c5c05810c324c34a589cfc0e3d2716a, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/A/20033b0a91f1489fb5842d5f0158c835, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/A/88a89e647af54b01bca51190f1d2b51d] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp, totalSize=39.1 K 2024-11-27T16:21:11,242 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting 1e47e4f1d38f48bfa265f8b0438e35da, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=291, earliestPutTs=1732724468789 2024-11-27T16:21:11,242 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8c5c05810c324c34a589cfc0e3d2716a, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=291, earliestPutTs=1732724468789 2024-11-27T16:21:11,243 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting f7ad4c8fde584367b0918c9db0a2263d, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=314, earliestPutTs=1732724469906 2024-11-27T16:21:11,243 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 20033b0a91f1489fb5842d5f0158c835, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=314, earliestPutTs=1732724469850 2024-11-27T16:21:11,243 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting 321a12869b5448f0a43b7a68929d5cf4, keycount=100, bloomtype=ROW, size=9.6 K, encoding=NONE, compression=NONE, seqNum=331, earliestPutTs=1732724470080 2024-11-27T16:21:11,244 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 88a89e647af54b01bca51190f1d2b51d, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=331, earliestPutTs=1732724470080 2024-11-27T16:21:11,264 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): d498a187112eb3635082ffac2dfb4cf9#B#compaction#69 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-27T16:21:11,265 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): d498a187112eb3635082ffac2dfb4cf9#A#compaction#70 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-27T16:21:11,265 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/B/2f125909aabe4f6587c2585c5fb9f28d is 50, key is test_row_0/B:col10/1732724470080/Put/seqid=0 2024-11-27T16:21:11,265 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/A/1ab6737aff264112993b90fe7db66374 is 50, key is test_row_0/A:col10/1732724470080/Put/seqid=0 2024-11-27T16:21:11,271 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073741909_1085 (size=13085) 2024-11-27T16:21:11,272 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073741908_1084 (size=13085) 2024-11-27T16:21:11,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=22 2024-11-27T16:21:11,357 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:21:11,357 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=23 2024-11-27T16:21:11,357 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9. 2024-11-27T16:21:11,358 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(2837): Flushing d498a187112eb3635082ffac2dfb4cf9 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-11-27T16:21:11,358 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d498a187112eb3635082ffac2dfb4cf9, store=A 2024-11-27T16:21:11,358 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:21:11,358 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d498a187112eb3635082ffac2dfb4cf9, store=B 2024-11-27T16:21:11,358 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:21:11,358 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d498a187112eb3635082ffac2dfb4cf9, store=C 2024-11-27T16:21:11,358 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:21:11,363 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/A/26ccd1f3460042edb0b90da810a44326 is 50, key is test_row_0/A:col10/1732724470751/Put/seqid=0 2024-11-27T16:21:11,368 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073741910_1086 (size=12301) 2024-11-27T16:21:11,376 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9. as already flushing 2024-11-27T16:21:11,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(8581): Flush requested on d498a187112eb3635082ffac2dfb4cf9 2024-11-27T16:21:11,410 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:11,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 179 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41100 deadline: 1732724531407, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:11,516 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:11,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 181 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41100 deadline: 1732724531512, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:11,685 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/A/1ab6737aff264112993b90fe7db66374 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/A/1ab6737aff264112993b90fe7db66374 2024-11-27T16:21:11,686 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/B/2f125909aabe4f6587c2585c5fb9f28d as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/B/2f125909aabe4f6587c2585c5fb9f28d 2024-11-27T16:21:11,695 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d498a187112eb3635082ffac2dfb4cf9/B of d498a187112eb3635082ffac2dfb4cf9 into 2f125909aabe4f6587c2585c5fb9f28d(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T16:21:11,695 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d498a187112eb3635082ffac2dfb4cf9/A of d498a187112eb3635082ffac2dfb4cf9 into 1ab6737aff264112993b90fe7db66374(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T16:21:11,695 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d498a187112eb3635082ffac2dfb4cf9: 2024-11-27T16:21:11,695 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d498a187112eb3635082ffac2dfb4cf9: 2024-11-27T16:21:11,695 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9., storeName=d498a187112eb3635082ffac2dfb4cf9/B, priority=13, startTime=1732724471238; duration=0sec 2024-11-27T16:21:11,695 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9., storeName=d498a187112eb3635082ffac2dfb4cf9/A, priority=13, startTime=1732724471238; duration=0sec 2024-11-27T16:21:11,695 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-27T16:21:11,695 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d498a187112eb3635082ffac2dfb4cf9:B 2024-11-27T16:21:11,695 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-27T16:21:11,695 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d498a187112eb3635082ffac2dfb4cf9:A 2024-11-27T16:21:11,695 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-27T16:21:11,697 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 35141 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-27T16:21:11,697 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HStore(1540): d498a187112eb3635082ffac2dfb4cf9/C is initiating minor compaction (all files) 2024-11-27T16:21:11,697 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d498a187112eb3635082ffac2dfb4cf9/C in TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9. 2024-11-27T16:21:11,697 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/C/8ac00526098745c2aa2df339cc2835fc, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/C/cd784bb0ba514a36b63c42ed745070c5, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/C/3209dfa5b1e2422c8befc1517b5d7c2c] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp, totalSize=34.3 K 2024-11-27T16:21:11,698 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting 8ac00526098745c2aa2df339cc2835fc, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=291, earliestPutTs=1732724468789 2024-11-27T16:21:11,698 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting cd784bb0ba514a36b63c42ed745070c5, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=314, earliestPutTs=1732724469906 2024-11-27T16:21:11,698 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting 3209dfa5b1e2422c8befc1517b5d7c2c, keycount=100, bloomtype=ROW, size=9.6 K, encoding=NONE, compression=NONE, seqNum=331, earliestPutTs=1732724470080 2024-11-27T16:21:11,709 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): d498a187112eb3635082ffac2dfb4cf9#C#compaction#72 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-27T16:21:11,710 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/C/b90fd464dcb7442f946599f1b97363fd is 50, key is test_row_0/C:col10/1732724470080/Put/seqid=0 2024-11-27T16:21:11,719 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073741911_1087 (size=13085) 2024-11-27T16:21:11,719 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:11,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 183 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41100 deadline: 1732724531718, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:11,769 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=353 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/A/26ccd1f3460042edb0b90da810a44326 2024-11-27T16:21:11,782 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/B/db07053f421340aab8f79a7249661c36 is 50, key is test_row_0/B:col10/1732724470751/Put/seqid=0 2024-11-27T16:21:11,793 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073741912_1088 (size=12301) 2024-11-27T16:21:11,794 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=353 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/B/db07053f421340aab8f79a7249661c36 2024-11-27T16:21:11,805 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/C/ad87d89786fd4da895373e96e266e048 is 50, key is test_row_0/C:col10/1732724470751/Put/seqid=0 2024-11-27T16:21:11,821 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073741913_1089 (size=12301) 2024-11-27T16:21:11,822 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=353 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/C/ad87d89786fd4da895373e96e266e048 2024-11-27T16:21:11,829 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/A/26ccd1f3460042edb0b90da810a44326 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/A/26ccd1f3460042edb0b90da810a44326 2024-11-27T16:21:11,835 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/A/26ccd1f3460042edb0b90da810a44326, entries=150, sequenceid=353, filesize=12.0 K 2024-11-27T16:21:11,838 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/B/db07053f421340aab8f79a7249661c36 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/B/db07053f421340aab8f79a7249661c36 2024-11-27T16:21:11,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=22 2024-11-27T16:21:11,846 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/B/db07053f421340aab8f79a7249661c36, entries=150, sequenceid=353, filesize=12.0 K 2024-11-27T16:21:11,848 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/C/ad87d89786fd4da895373e96e266e048 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/C/ad87d89786fd4da895373e96e266e048 2024-11-27T16:21:11,855 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/C/ad87d89786fd4da895373e96e266e048, entries=150, sequenceid=353, filesize=12.0 K 2024-11-27T16:21:11,856 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(3040): Finished flush of dataSize ~127.47 KB/130530, heapSize ~334.69 KB/342720, currentSize=73.80 KB/75570 for d498a187112eb3635082ffac2dfb4cf9 in 499ms, sequenceid=353, compaction requested=false 2024-11-27T16:21:11,856 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(2538): Flush status journal for d498a187112eb3635082ffac2dfb4cf9: 2024-11-27T16:21:11,856 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9. 2024-11-27T16:21:11,856 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=23 2024-11-27T16:21:11,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4106): Remote procedure done, pid=23 2024-11-27T16:21:11,860 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=23, resume processing ppid=22 2024-11-27T16:21:11,860 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=23, ppid=22, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.1200 sec 2024-11-27T16:21:11,862 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=22, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=22, table=TestAcidGuarantees in 1.1260 sec 2024-11-27T16:21:12,025 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing d498a187112eb3635082ffac2dfb4cf9 3/3 column families, dataSize=80.51 KB heapSize=211.69 KB 2024-11-27T16:21:12,026 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d498a187112eb3635082ffac2dfb4cf9, store=A 2024-11-27T16:21:12,026 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:21:12,026 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d498a187112eb3635082ffac2dfb4cf9, store=B 2024-11-27T16:21:12,026 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:21:12,026 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d498a187112eb3635082ffac2dfb4cf9, store=C 2024-11-27T16:21:12,026 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:21:12,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(8581): Flush requested on d498a187112eb3635082ffac2dfb4cf9 2024-11-27T16:21:12,032 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/A/b49f37ea8cb54b78aad63ee64fa2aaf3 is 50, key is test_row_0/A:col10/1732724472022/Put/seqid=0 2024-11-27T16:21:12,045 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073741914_1090 (size=12301) 2024-11-27T16:21:12,096 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:12,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 204 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41100 deadline: 1732724532095, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:12,127 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/C/b90fd464dcb7442f946599f1b97363fd as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/C/b90fd464dcb7442f946599f1b97363fd 2024-11-27T16:21:12,133 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d498a187112eb3635082ffac2dfb4cf9/C of d498a187112eb3635082ffac2dfb4cf9 into b90fd464dcb7442f946599f1b97363fd(size=12.8 K), total size for store is 24.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T16:21:12,133 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d498a187112eb3635082ffac2dfb4cf9: 2024-11-27T16:21:12,133 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9., storeName=d498a187112eb3635082ffac2dfb4cf9/C, priority=13, startTime=1732724471239; duration=0sec 2024-11-27T16:21:12,134 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T16:21:12,134 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d498a187112eb3635082ffac2dfb4cf9:C 2024-11-27T16:21:12,200 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:12,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 206 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41100 deadline: 1732724532198, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:12,403 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:12,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 208 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41100 deadline: 1732724532403, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:12,446 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=370 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/A/b49f37ea8cb54b78aad63ee64fa2aaf3 2024-11-27T16:21:12,458 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/B/719b6d44a8e443c48f271e7bd8494e66 is 50, key is test_row_0/B:col10/1732724472022/Put/seqid=0 2024-11-27T16:21:12,464 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073741915_1091 (size=12301) 2024-11-27T16:21:12,464 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=370 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/B/719b6d44a8e443c48f271e7bd8494e66 2024-11-27T16:21:12,474 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/C/18c5c97905b7451c9bfcc25a9993859f is 50, key is test_row_0/C:col10/1732724472022/Put/seqid=0 2024-11-27T16:21:12,499 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073741916_1092 (size=12301) 2024-11-27T16:21:12,501 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=370 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/C/18c5c97905b7451c9bfcc25a9993859f 2024-11-27T16:21:12,507 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/A/b49f37ea8cb54b78aad63ee64fa2aaf3 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/A/b49f37ea8cb54b78aad63ee64fa2aaf3 2024-11-27T16:21:12,514 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/A/b49f37ea8cb54b78aad63ee64fa2aaf3, entries=150, sequenceid=370, filesize=12.0 K 2024-11-27T16:21:12,517 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/B/719b6d44a8e443c48f271e7bd8494e66 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/B/719b6d44a8e443c48f271e7bd8494e66 2024-11-27T16:21:12,524 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/B/719b6d44a8e443c48f271e7bd8494e66, entries=150, sequenceid=370, filesize=12.0 K 2024-11-27T16:21:12,528 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/C/18c5c97905b7451c9bfcc25a9993859f as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/C/18c5c97905b7451c9bfcc25a9993859f 2024-11-27T16:21:12,534 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/C/18c5c97905b7451c9bfcc25a9993859f, entries=150, sequenceid=370, filesize=12.0 K 2024-11-27T16:21:12,535 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=120.76 KB/123660 for d498a187112eb3635082ffac2dfb4cf9 in 510ms, sequenceid=370, compaction requested=true 2024-11-27T16:21:12,535 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for d498a187112eb3635082ffac2dfb4cf9: 2024-11-27T16:21:12,536 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d498a187112eb3635082ffac2dfb4cf9:A, priority=-2147483648, current under compaction store size is 1 2024-11-27T16:21:12,536 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T16:21:12,536 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-27T16:21:12,536 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-27T16:21:12,536 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d498a187112eb3635082ffac2dfb4cf9:B, priority=-2147483648, current under compaction store size is 2 2024-11-27T16:21:12,536 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T16:21:12,536 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d498a187112eb3635082ffac2dfb4cf9:C, priority=-2147483648, current under compaction store size is 3 2024-11-27T16:21:12,536 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-27T16:21:12,538 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37687 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-27T16:21:12,538 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HStore(1540): d498a187112eb3635082ffac2dfb4cf9/A is initiating minor compaction (all files) 2024-11-27T16:21:12,539 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d498a187112eb3635082ffac2dfb4cf9/A in TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9. 2024-11-27T16:21:12,539 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/A/1ab6737aff264112993b90fe7db66374, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/A/26ccd1f3460042edb0b90da810a44326, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/A/b49f37ea8cb54b78aad63ee64fa2aaf3] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp, totalSize=36.8 K 2024-11-27T16:21:12,539 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37687 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-27T16:21:12,539 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HStore(1540): d498a187112eb3635082ffac2dfb4cf9/B is initiating minor compaction (all files) 2024-11-27T16:21:12,539 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d498a187112eb3635082ffac2dfb4cf9/B in TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9. 2024-11-27T16:21:12,539 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/B/2f125909aabe4f6587c2585c5fb9f28d, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/B/db07053f421340aab8f79a7249661c36, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/B/719b6d44a8e443c48f271e7bd8494e66] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp, totalSize=36.8 K 2024-11-27T16:21:12,540 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1ab6737aff264112993b90fe7db66374, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=331, earliestPutTs=1732724469910 2024-11-27T16:21:12,540 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting 2f125909aabe4f6587c2585c5fb9f28d, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=331, earliestPutTs=1732724469910 2024-11-27T16:21:12,541 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 26ccd1f3460042edb0b90da810a44326, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=353, earliestPutTs=1732724470751 2024-11-27T16:21:12,541 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting db07053f421340aab8f79a7249661c36, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=353, earliestPutTs=1732724470751 2024-11-27T16:21:12,541 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.Compactor(224): Compacting b49f37ea8cb54b78aad63ee64fa2aaf3, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=370, earliestPutTs=1732724471395 2024-11-27T16:21:12,541 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting 719b6d44a8e443c48f271e7bd8494e66, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=370, earliestPutTs=1732724471395 2024-11-27T16:21:12,556 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): d498a187112eb3635082ffac2dfb4cf9#A#compaction#79 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-27T16:21:12,556 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): d498a187112eb3635082ffac2dfb4cf9#B#compaction#78 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-27T16:21:12,557 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/A/c52c567c854e46008710452ce175cf36 is 50, key is test_row_0/A:col10/1732724472022/Put/seqid=0 2024-11-27T16:21:12,557 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/B/85c2aa6d18284e38a5911ee89ddaf616 is 50, key is test_row_0/B:col10/1732724472022/Put/seqid=0 2024-11-27T16:21:12,586 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073741917_1093 (size=13187) 2024-11-27T16:21:12,588 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073741918_1094 (size=13187) 2024-11-27T16:21:12,601 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/B/85c2aa6d18284e38a5911ee89ddaf616 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/B/85c2aa6d18284e38a5911ee89ddaf616 2024-11-27T16:21:12,610 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d498a187112eb3635082ffac2dfb4cf9/B of d498a187112eb3635082ffac2dfb4cf9 into 85c2aa6d18284e38a5911ee89ddaf616(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T16:21:12,610 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d498a187112eb3635082ffac2dfb4cf9: 2024-11-27T16:21:12,610 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9., storeName=d498a187112eb3635082ffac2dfb4cf9/B, priority=13, startTime=1732724472536; duration=0sec 2024-11-27T16:21:12,610 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-27T16:21:12,610 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d498a187112eb3635082ffac2dfb4cf9:B 2024-11-27T16:21:12,610 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-27T16:21:12,612 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37687 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-27T16:21:12,612 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HStore(1540): d498a187112eb3635082ffac2dfb4cf9/C is initiating minor compaction (all files) 2024-11-27T16:21:12,612 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d498a187112eb3635082ffac2dfb4cf9/C in TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9. 2024-11-27T16:21:12,613 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/C/b90fd464dcb7442f946599f1b97363fd, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/C/ad87d89786fd4da895373e96e266e048, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/C/18c5c97905b7451c9bfcc25a9993859f] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp, totalSize=36.8 K 2024-11-27T16:21:12,614 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting b90fd464dcb7442f946599f1b97363fd, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=331, earliestPutTs=1732724469910 2024-11-27T16:21:12,614 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting ad87d89786fd4da895373e96e266e048, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=353, earliestPutTs=1732724470751 2024-11-27T16:21:12,615 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting 18c5c97905b7451c9bfcc25a9993859f, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=370, earliestPutTs=1732724471395 2024-11-27T16:21:12,624 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): d498a187112eb3635082ffac2dfb4cf9#C#compaction#80 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-27T16:21:12,625 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/C/1679c4b09c454e179e0b4009a57e239a is 50, key is test_row_0/C:col10/1732724472022/Put/seqid=0 2024-11-27T16:21:12,635 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073741919_1095 (size=13187) 2024-11-27T16:21:12,647 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/C/1679c4b09c454e179e0b4009a57e239a as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/C/1679c4b09c454e179e0b4009a57e239a 2024-11-27T16:21:12,658 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d498a187112eb3635082ffac2dfb4cf9/C of d498a187112eb3635082ffac2dfb4cf9 into 1679c4b09c454e179e0b4009a57e239a(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T16:21:12,658 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d498a187112eb3635082ffac2dfb4cf9: 2024-11-27T16:21:12,658 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9., storeName=d498a187112eb3635082ffac2dfb4cf9/C, priority=13, startTime=1732724472536; duration=0sec 2024-11-27T16:21:12,658 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T16:21:12,658 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d498a187112eb3635082ffac2dfb4cf9:C 2024-11-27T16:21:12,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(8581): Flush requested on d498a187112eb3635082ffac2dfb4cf9 2024-11-27T16:21:12,707 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing d498a187112eb3635082ffac2dfb4cf9 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-11-27T16:21:12,707 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d498a187112eb3635082ffac2dfb4cf9, store=A 2024-11-27T16:21:12,707 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:21:12,707 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d498a187112eb3635082ffac2dfb4cf9, store=B 2024-11-27T16:21:12,707 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:21:12,707 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d498a187112eb3635082ffac2dfb4cf9, store=C 2024-11-27T16:21:12,708 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:21:12,714 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/A/11026ccaed304892b69dbd5b940e2798 is 50, key is test_row_0/A:col10/1732724472705/Put/seqid=0 2024-11-27T16:21:12,726 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073741920_1096 (size=14741) 2024-11-27T16:21:12,739 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:12,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 222 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41100 deadline: 1732724532737, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:12,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=22 2024-11-27T16:21:12,840 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 22 completed 2024-11-27T16:21:12,842 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-27T16:21:12,842 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:12,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 224 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41100 deadline: 1732724532840, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:12,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] procedure2.ProcedureExecutor(1098): Stored pid=24, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=24, table=TestAcidGuarantees 2024-11-27T16:21:12,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=24 2024-11-27T16:21:12,844 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=24, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=24, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-27T16:21:12,845 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=24, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=24, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-27T16:21:12,845 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=25, ppid=24, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-27T16:21:12,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=24 2024-11-27T16:21:12,947 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:12,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41120 deadline: 1732724532944, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:12,947 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:12,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41170 deadline: 1732724532945, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:12,948 DEBUG [Thread-149 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4144 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9., hostname=7b191dec6496,44169,1732724452967, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-27T16:21:12,949 DEBUG [Thread-151 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4142 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9., hostname=7b191dec6496,44169,1732724452967, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-27T16:21:12,960 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:12,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41106 deadline: 1732724532960, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:12,961 DEBUG [Thread-155 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4156 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9., hostname=7b191dec6496,44169,1732724452967, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-27T16:21:12,961 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:12,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41130 deadline: 1732724532961, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:12,962 DEBUG [Thread-153 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4157 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9., hostname=7b191dec6496,44169,1732724452967, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-27T16:21:12,995 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/A/c52c567c854e46008710452ce175cf36 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/A/c52c567c854e46008710452ce175cf36 2024-11-27T16:21:12,998 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:21:12,998 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=25 2024-11-27T16:21:12,998 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9. 2024-11-27T16:21:12,998 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9. as already flushing 2024-11-27T16:21:12,999 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9. 2024-11-27T16:21:12,999 ERROR [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] handler.RSProcedureHandler(58): pid=25 java.io.IOException: Unable to complete flush {ENCODED => d498a187112eb3635082ffac2dfb4cf9, NAME => 'TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:21:12,999 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=25 java.io.IOException: Unable to complete flush {ENCODED => d498a187112eb3635082ffac2dfb4cf9, NAME => 'TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:21:13,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4114): Remote procedure failed, pid=25 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d498a187112eb3635082ffac2dfb4cf9, NAME => 'TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d498a187112eb3635082ffac2dfb4cf9, NAME => 'TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:21:13,005 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d498a187112eb3635082ffac2dfb4cf9/A of d498a187112eb3635082ffac2dfb4cf9 into c52c567c854e46008710452ce175cf36(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T16:21:13,005 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d498a187112eb3635082ffac2dfb4cf9: 2024-11-27T16:21:13,005 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9., storeName=d498a187112eb3635082ffac2dfb4cf9/A, priority=13, startTime=1732724472536; duration=0sec 2024-11-27T16:21:13,005 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T16:21:13,005 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d498a187112eb3635082ffac2dfb4cf9:A 2024-11-27T16:21:13,044 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:13,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 226 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41100 deadline: 1732724533044, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:13,127 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=395 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/A/11026ccaed304892b69dbd5b940e2798 2024-11-27T16:21:13,137 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/B/315b5a4e7dea4b519cc8b042ad98c254 is 50, key is test_row_0/B:col10/1732724472705/Put/seqid=0 2024-11-27T16:21:13,143 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073741921_1097 (size=12301) 2024-11-27T16:21:13,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=24 2024-11-27T16:21:13,152 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:21:13,153 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=25 2024-11-27T16:21:13,153 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9. 2024-11-27T16:21:13,153 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9. as already flushing 2024-11-27T16:21:13,153 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9. 2024-11-27T16:21:13,153 ERROR [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] handler.RSProcedureHandler(58): pid=25 java.io.IOException: Unable to complete flush {ENCODED => d498a187112eb3635082ffac2dfb4cf9, NAME => 'TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:21:13,154 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=25 java.io.IOException: Unable to complete flush {ENCODED => d498a187112eb3635082ffac2dfb4cf9, NAME => 'TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:21:13,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4114): Remote procedure failed, pid=25 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d498a187112eb3635082ffac2dfb4cf9, NAME => 'TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d498a187112eb3635082ffac2dfb4cf9, NAME => 'TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:21:13,306 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:21:13,306 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=25 2024-11-27T16:21:13,307 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9. 2024-11-27T16:21:13,307 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9. as already flushing 2024-11-27T16:21:13,307 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9. 2024-11-27T16:21:13,307 ERROR [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] handler.RSProcedureHandler(58): pid=25 java.io.IOException: Unable to complete flush {ENCODED => d498a187112eb3635082ffac2dfb4cf9, NAME => 'TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:21:13,307 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=25 java.io.IOException: Unable to complete flush {ENCODED => d498a187112eb3635082ffac2dfb4cf9, NAME => 'TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:21:13,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4114): Remote procedure failed, pid=25 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d498a187112eb3635082ffac2dfb4cf9, NAME => 'TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d498a187112eb3635082ffac2dfb4cf9, NAME => 'TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:21:13,347 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:13,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 228 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41100 deadline: 1732724533347, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:13,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=24 2024-11-27T16:21:13,461 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:21:13,461 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=25 2024-11-27T16:21:13,461 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9. 2024-11-27T16:21:13,462 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9. as already flushing 2024-11-27T16:21:13,462 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9. 2024-11-27T16:21:13,462 ERROR [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] handler.RSProcedureHandler(58): pid=25 java.io.IOException: Unable to complete flush {ENCODED => d498a187112eb3635082ffac2dfb4cf9, NAME => 'TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:21:13,462 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=25 java.io.IOException: Unable to complete flush {ENCODED => d498a187112eb3635082ffac2dfb4cf9, NAME => 'TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:21:13,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4114): Remote procedure failed, pid=25 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d498a187112eb3635082ffac2dfb4cf9, NAME => 'TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d498a187112eb3635082ffac2dfb4cf9, NAME => 'TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:21:13,544 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=395 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/B/315b5a4e7dea4b519cc8b042ad98c254 2024-11-27T16:21:13,555 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/C/5d86ec6d915a4ea080340e1f4eac48c5 is 50, key is test_row_0/C:col10/1732724472705/Put/seqid=0 2024-11-27T16:21:13,559 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073741922_1098 (size=12301) 2024-11-27T16:21:13,560 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=395 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/C/5d86ec6d915a4ea080340e1f4eac48c5 2024-11-27T16:21:13,566 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/A/11026ccaed304892b69dbd5b940e2798 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/A/11026ccaed304892b69dbd5b940e2798 2024-11-27T16:21:13,572 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/A/11026ccaed304892b69dbd5b940e2798, entries=200, sequenceid=395, filesize=14.4 K 2024-11-27T16:21:13,574 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/B/315b5a4e7dea4b519cc8b042ad98c254 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/B/315b5a4e7dea4b519cc8b042ad98c254 2024-11-27T16:21:13,579 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/B/315b5a4e7dea4b519cc8b042ad98c254, entries=150, sequenceid=395, filesize=12.0 K 2024-11-27T16:21:13,581 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/C/5d86ec6d915a4ea080340e1f4eac48c5 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/C/5d86ec6d915a4ea080340e1f4eac48c5 2024-11-27T16:21:13,588 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/C/5d86ec6d915a4ea080340e1f4eac48c5, entries=150, sequenceid=395, filesize=12.0 K 2024-11-27T16:21:13,589 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~127.47 KB/130530, heapSize ~334.69 KB/342720, currentSize=73.80 KB/75570 for d498a187112eb3635082ffac2dfb4cf9 in 882ms, sequenceid=395, compaction requested=false 2024-11-27T16:21:13,589 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for d498a187112eb3635082ffac2dfb4cf9: 2024-11-27T16:21:13,615 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:21:13,615 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=25 2024-11-27T16:21:13,615 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9. 2024-11-27T16:21:13,616 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(2837): Flushing d498a187112eb3635082ffac2dfb4cf9 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-11-27T16:21:13,616 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d498a187112eb3635082ffac2dfb4cf9, store=A 2024-11-27T16:21:13,616 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:21:13,616 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d498a187112eb3635082ffac2dfb4cf9, store=B 2024-11-27T16:21:13,616 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:21:13,617 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d498a187112eb3635082ffac2dfb4cf9, store=C 2024-11-27T16:21:13,617 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:21:13,623 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/A/dbb18098580f4dca854dbf25b2bfbd3b is 50, key is test_row_0/A:col10/1732724472726/Put/seqid=0 2024-11-27T16:21:13,637 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073741923_1099 (size=12301) 2024-11-27T16:21:13,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(8581): Flush requested on d498a187112eb3635082ffac2dfb4cf9 2024-11-27T16:21:13,850 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9. as already flushing 2024-11-27T16:21:13,905 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:13,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 249 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41100 deadline: 1732724533905, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:13,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=24 2024-11-27T16:21:14,008 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:14,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 251 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41100 deadline: 1732724534007, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:14,038 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=410 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/A/dbb18098580f4dca854dbf25b2bfbd3b 2024-11-27T16:21:14,050 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/B/f70b1a4c8cc74df782519031ca553d91 is 50, key is test_row_0/B:col10/1732724472726/Put/seqid=0 2024-11-27T16:21:14,059 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073741924_1100 (size=12301) 2024-11-27T16:21:14,060 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=410 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/B/f70b1a4c8cc74df782519031ca553d91 2024-11-27T16:21:14,070 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/C/a306b786311d4d1cbdd7c33e601f855e is 50, key is test_row_0/C:col10/1732724472726/Put/seqid=0 2024-11-27T16:21:14,090 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073741925_1101 (size=12301) 2024-11-27T16:21:14,091 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=410 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/C/a306b786311d4d1cbdd7c33e601f855e 2024-11-27T16:21:14,099 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/A/dbb18098580f4dca854dbf25b2bfbd3b as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/A/dbb18098580f4dca854dbf25b2bfbd3b 2024-11-27T16:21:14,107 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/A/dbb18098580f4dca854dbf25b2bfbd3b, entries=150, sequenceid=410, filesize=12.0 K 2024-11-27T16:21:14,108 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/B/f70b1a4c8cc74df782519031ca553d91 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/B/f70b1a4c8cc74df782519031ca553d91 2024-11-27T16:21:14,115 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/B/f70b1a4c8cc74df782519031ca553d91, entries=150, sequenceid=410, filesize=12.0 K 2024-11-27T16:21:14,117 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/C/a306b786311d4d1cbdd7c33e601f855e as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/C/a306b786311d4d1cbdd7c33e601f855e 2024-11-27T16:21:14,125 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/C/a306b786311d4d1cbdd7c33e601f855e, entries=150, sequenceid=410, filesize=12.0 K 2024-11-27T16:21:14,126 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=127.47 KB/130530 for d498a187112eb3635082ffac2dfb4cf9 in 510ms, sequenceid=410, compaction requested=true 2024-11-27T16:21:14,127 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(2538): Flush status journal for d498a187112eb3635082ffac2dfb4cf9: 2024-11-27T16:21:14,127 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9. 2024-11-27T16:21:14,127 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=25 2024-11-27T16:21:14,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4106): Remote procedure done, pid=25 2024-11-27T16:21:14,131 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=25, resume processing ppid=24 2024-11-27T16:21:14,131 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=25, ppid=24, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.2830 sec 2024-11-27T16:21:14,133 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=24, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=24, table=TestAcidGuarantees in 1.2900 sec 2024-11-27T16:21:14,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(8581): Flush requested on d498a187112eb3635082ffac2dfb4cf9 2024-11-27T16:21:14,211 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing d498a187112eb3635082ffac2dfb4cf9 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-11-27T16:21:14,212 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d498a187112eb3635082ffac2dfb4cf9, store=A 2024-11-27T16:21:14,212 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:21:14,212 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d498a187112eb3635082ffac2dfb4cf9, store=B 2024-11-27T16:21:14,212 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:21:14,212 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d498a187112eb3635082ffac2dfb4cf9, store=C 2024-11-27T16:21:14,212 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:21:14,219 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/A/8c0e0074f68e4d0a81093f8b6515f4e3 is 50, key is test_row_0/A:col10/1732724474210/Put/seqid=0 2024-11-27T16:21:14,230 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073741926_1102 (size=14741) 2024-11-27T16:21:14,231 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=433 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/A/8c0e0074f68e4d0a81093f8b6515f4e3 2024-11-27T16:21:14,244 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/B/c1fc6526d81c48a9922879d6786ac34b is 50, key is test_row_0/B:col10/1732724474210/Put/seqid=0 2024-11-27T16:21:14,246 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:14,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 264 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41100 deadline: 1732724534246, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:14,249 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073741927_1103 (size=12301) 2024-11-27T16:21:14,349 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:14,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 266 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41100 deadline: 1732724534348, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:14,551 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:14,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 268 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41100 deadline: 1732724534550, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:14,650 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=433 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/B/c1fc6526d81c48a9922879d6786ac34b 2024-11-27T16:21:14,662 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/C/180eb584ab99445a8a090eaed2c6117d is 50, key is test_row_0/C:col10/1732724474210/Put/seqid=0 2024-11-27T16:21:14,675 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073741928_1104 (size=12301) 2024-11-27T16:21:14,676 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=433 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/C/180eb584ab99445a8a090eaed2c6117d 2024-11-27T16:21:14,683 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/A/8c0e0074f68e4d0a81093f8b6515f4e3 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/A/8c0e0074f68e4d0a81093f8b6515f4e3 2024-11-27T16:21:14,692 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/A/8c0e0074f68e4d0a81093f8b6515f4e3, entries=200, sequenceid=433, filesize=14.4 K 2024-11-27T16:21:14,694 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/B/c1fc6526d81c48a9922879d6786ac34b as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/B/c1fc6526d81c48a9922879d6786ac34b 2024-11-27T16:21:14,700 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/B/c1fc6526d81c48a9922879d6786ac34b, entries=150, sequenceid=433, filesize=12.0 K 2024-11-27T16:21:14,702 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/C/180eb584ab99445a8a090eaed2c6117d as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/C/180eb584ab99445a8a090eaed2c6117d 2024-11-27T16:21:14,710 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/C/180eb584ab99445a8a090eaed2c6117d, entries=150, sequenceid=433, filesize=12.0 K 2024-11-27T16:21:14,712 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=67.09 KB/68700 for d498a187112eb3635082ffac2dfb4cf9 in 500ms, sequenceid=433, compaction requested=true 2024-11-27T16:21:14,712 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for d498a187112eb3635082ffac2dfb4cf9: 2024-11-27T16:21:14,712 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-27T16:21:14,714 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d498a187112eb3635082ffac2dfb4cf9:A, priority=-2147483648, current under compaction store size is 1 2024-11-27T16:21:14,714 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 54970 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-27T16:21:14,714 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HStore(1540): d498a187112eb3635082ffac2dfb4cf9/A is initiating minor compaction (all files) 2024-11-27T16:21:14,714 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d498a187112eb3635082ffac2dfb4cf9/A in TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9. 2024-11-27T16:21:14,714 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T16:21:14,714 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/A/c52c567c854e46008710452ce175cf36, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/A/11026ccaed304892b69dbd5b940e2798, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/A/dbb18098580f4dca854dbf25b2bfbd3b, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/A/8c0e0074f68e4d0a81093f8b6515f4e3] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp, totalSize=53.7 K 2024-11-27T16:21:14,714 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d498a187112eb3635082ffac2dfb4cf9:B, priority=-2147483648, current under compaction store size is 2 2024-11-27T16:21:14,714 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-27T16:21:14,715 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.Compactor(224): Compacting c52c567c854e46008710452ce175cf36, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=370, earliestPutTs=1732724471395 2024-11-27T16:21:14,715 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 11026ccaed304892b69dbd5b940e2798, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=395, earliestPutTs=1732724472080 2024-11-27T16:21:14,715 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T16:21:14,716 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d498a187112eb3635082ffac2dfb4cf9:C, priority=-2147483648, current under compaction store size is 3 2024-11-27T16:21:14,716 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.Compactor(224): Compacting dbb18098580f4dca854dbf25b2bfbd3b, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=410, earliestPutTs=1732724472726 2024-11-27T16:21:14,716 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-27T16:21:14,716 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8c0e0074f68e4d0a81093f8b6515f4e3, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=433, earliestPutTs=1732724473888 2024-11-27T16:21:14,717 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 50090 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-27T16:21:14,717 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HStore(1540): d498a187112eb3635082ffac2dfb4cf9/B is initiating minor compaction (all files) 2024-11-27T16:21:14,717 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d498a187112eb3635082ffac2dfb4cf9/B in TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9. 2024-11-27T16:21:14,717 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/B/85c2aa6d18284e38a5911ee89ddaf616, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/B/315b5a4e7dea4b519cc8b042ad98c254, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/B/f70b1a4c8cc74df782519031ca553d91, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/B/c1fc6526d81c48a9922879d6786ac34b] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp, totalSize=48.9 K 2024-11-27T16:21:14,718 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting 85c2aa6d18284e38a5911ee89ddaf616, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=370, earliestPutTs=1732724471395 2024-11-27T16:21:14,718 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting 315b5a4e7dea4b519cc8b042ad98c254, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=395, earliestPutTs=1732724472080 2024-11-27T16:21:14,719 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting f70b1a4c8cc74df782519031ca553d91, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=410, earliestPutTs=1732724472726 2024-11-27T16:21:14,719 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting c1fc6526d81c48a9922879d6786ac34b, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=433, earliestPutTs=1732724473888 2024-11-27T16:21:14,738 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): d498a187112eb3635082ffac2dfb4cf9#A#compaction#90 average throughput is 1.64 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-27T16:21:14,738 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/A/30339113df074e55b9b819a2aea5d318 is 50, key is test_row_0/A:col10/1732724474210/Put/seqid=0 2024-11-27T16:21:14,746 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): d498a187112eb3635082ffac2dfb4cf9#B#compaction#91 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-27T16:21:14,746 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/B/2f0df801ee0c4735aacfe87e30ba2189 is 50, key is test_row_0/B:col10/1732724474210/Put/seqid=0 2024-11-27T16:21:14,755 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073741930_1106 (size=13323) 2024-11-27T16:21:14,762 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073741929_1105 (size=13323) 2024-11-27T16:21:14,764 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/B/2f0df801ee0c4735aacfe87e30ba2189 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/B/2f0df801ee0c4735aacfe87e30ba2189 2024-11-27T16:21:14,773 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/A/30339113df074e55b9b819a2aea5d318 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/A/30339113df074e55b9b819a2aea5d318 2024-11-27T16:21:14,775 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in d498a187112eb3635082ffac2dfb4cf9/B of d498a187112eb3635082ffac2dfb4cf9 into 2f0df801ee0c4735aacfe87e30ba2189(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T16:21:14,775 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d498a187112eb3635082ffac2dfb4cf9: 2024-11-27T16:21:14,775 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9., storeName=d498a187112eb3635082ffac2dfb4cf9/B, priority=12, startTime=1732724474714; duration=0sec 2024-11-27T16:21:14,775 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-27T16:21:14,775 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d498a187112eb3635082ffac2dfb4cf9:B 2024-11-27T16:21:14,776 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-27T16:21:14,778 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 50090 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-27T16:21:14,778 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HStore(1540): d498a187112eb3635082ffac2dfb4cf9/C is initiating minor compaction (all files) 2024-11-27T16:21:14,778 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d498a187112eb3635082ffac2dfb4cf9/C in TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9. 2024-11-27T16:21:14,778 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/C/1679c4b09c454e179e0b4009a57e239a, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/C/5d86ec6d915a4ea080340e1f4eac48c5, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/C/a306b786311d4d1cbdd7c33e601f855e, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/C/180eb584ab99445a8a090eaed2c6117d] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp, totalSize=48.9 K 2024-11-27T16:21:14,779 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting 1679c4b09c454e179e0b4009a57e239a, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=370, earliestPutTs=1732724471395 2024-11-27T16:21:14,780 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting 5d86ec6d915a4ea080340e1f4eac48c5, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=395, earliestPutTs=1732724472080 2024-11-27T16:21:14,780 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting a306b786311d4d1cbdd7c33e601f855e, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=410, earliestPutTs=1732724472726 2024-11-27T16:21:14,781 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in d498a187112eb3635082ffac2dfb4cf9/A of d498a187112eb3635082ffac2dfb4cf9 into 30339113df074e55b9b819a2aea5d318(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T16:21:14,781 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d498a187112eb3635082ffac2dfb4cf9: 2024-11-27T16:21:14,781 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9., storeName=d498a187112eb3635082ffac2dfb4cf9/A, priority=12, startTime=1732724474712; duration=0sec 2024-11-27T16:21:14,781 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T16:21:14,781 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d498a187112eb3635082ffac2dfb4cf9:A 2024-11-27T16:21:14,781 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting 180eb584ab99445a8a090eaed2c6117d, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=433, earliestPutTs=1732724473888 2024-11-27T16:21:14,793 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): d498a187112eb3635082ffac2dfb4cf9#C#compaction#92 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-27T16:21:14,794 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/C/86e8ba91d9594b7fb1d0fc19ed3e8bcf is 50, key is test_row_0/C:col10/1732724474210/Put/seqid=0 2024-11-27T16:21:14,801 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073741931_1107 (size=13323) 2024-11-27T16:21:14,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(8581): Flush requested on d498a187112eb3635082ffac2dfb4cf9 2024-11-27T16:21:14,855 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing d498a187112eb3635082ffac2dfb4cf9 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-11-27T16:21:14,855 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d498a187112eb3635082ffac2dfb4cf9, store=A 2024-11-27T16:21:14,855 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:21:14,855 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d498a187112eb3635082ffac2dfb4cf9, store=B 2024-11-27T16:21:14,855 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:21:14,855 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d498a187112eb3635082ffac2dfb4cf9, store=C 2024-11-27T16:21:14,855 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:21:14,863 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/A/fb9ba5cc02d943d7b70e96abebf7e2e5 is 50, key is test_row_0/A:col10/1732724474245/Put/seqid=0 2024-11-27T16:21:14,889 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073741932_1108 (size=14741) 2024-11-27T16:21:14,922 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:14,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 290 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41100 deadline: 1732724534921, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:14,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=24 2024-11-27T16:21:14,949 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 24 completed 2024-11-27T16:21:14,950 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-27T16:21:14,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] procedure2.ProcedureExecutor(1098): Stored pid=26, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=26, table=TestAcidGuarantees 2024-11-27T16:21:14,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=26 2024-11-27T16:21:14,953 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=26, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=26, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-27T16:21:14,953 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=26, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=26, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-27T16:21:14,953 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=27, ppid=26, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-27T16:21:15,026 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:15,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 292 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41100 deadline: 1732724535025, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:15,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=26 2024-11-27T16:21:15,105 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:21:15,106 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=27 2024-11-27T16:21:15,106 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9. 2024-11-27T16:21:15,106 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9. as already flushing 2024-11-27T16:21:15,106 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9. 2024-11-27T16:21:15,106 ERROR [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] handler.RSProcedureHandler(58): pid=27 java.io.IOException: Unable to complete flush {ENCODED => d498a187112eb3635082ffac2dfb4cf9, NAME => 'TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:21:15,107 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=27 java.io.IOException: Unable to complete flush {ENCODED => d498a187112eb3635082ffac2dfb4cf9, NAME => 'TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:21:15,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4114): Remote procedure failed, pid=27 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d498a187112eb3635082ffac2dfb4cf9, NAME => 'TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d498a187112eb3635082ffac2dfb4cf9, NAME => 'TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:21:15,212 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/C/86e8ba91d9594b7fb1d0fc19ed3e8bcf as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/C/86e8ba91d9594b7fb1d0fc19ed3e8bcf 2024-11-27T16:21:15,220 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in d498a187112eb3635082ffac2dfb4cf9/C of d498a187112eb3635082ffac2dfb4cf9 into 86e8ba91d9594b7fb1d0fc19ed3e8bcf(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T16:21:15,220 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d498a187112eb3635082ffac2dfb4cf9: 2024-11-27T16:21:15,221 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9., storeName=d498a187112eb3635082ffac2dfb4cf9/C, priority=12, startTime=1732724474716; duration=0sec 2024-11-27T16:21:15,221 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T16:21:15,221 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d498a187112eb3635082ffac2dfb4cf9:C 2024-11-27T16:21:15,228 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:15,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 294 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41100 deadline: 1732724535228, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:15,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=26 2024-11-27T16:21:15,259 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:21:15,259 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=27 2024-11-27T16:21:15,260 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9. 2024-11-27T16:21:15,260 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9. as already flushing 2024-11-27T16:21:15,260 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9. 2024-11-27T16:21:15,260 ERROR [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] handler.RSProcedureHandler(58): pid=27 java.io.IOException: Unable to complete flush {ENCODED => d498a187112eb3635082ffac2dfb4cf9, NAME => 'TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:21:15,260 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=27 java.io.IOException: Unable to complete flush {ENCODED => d498a187112eb3635082ffac2dfb4cf9, NAME => 'TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:21:15,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4114): Remote procedure failed, pid=27 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d498a187112eb3635082ffac2dfb4cf9, NAME => 'TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d498a187112eb3635082ffac2dfb4cf9, NAME => 'TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:21:15,289 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=449 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/A/fb9ba5cc02d943d7b70e96abebf7e2e5 2024-11-27T16:21:15,299 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/B/d3888138403f436688e154ea819dad26 is 50, key is test_row_0/B:col10/1732724474245/Put/seqid=0 2024-11-27T16:21:15,305 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073741933_1109 (size=12301) 2024-11-27T16:21:15,307 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=449 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/B/d3888138403f436688e154ea819dad26 2024-11-27T16:21:15,317 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/C/916e7fc2df234ef898122edf189559f2 is 50, key is test_row_0/C:col10/1732724474245/Put/seqid=0 2024-11-27T16:21:15,339 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073741934_1110 (size=12301) 2024-11-27T16:21:15,340 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=449 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/C/916e7fc2df234ef898122edf189559f2 2024-11-27T16:21:15,347 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/A/fb9ba5cc02d943d7b70e96abebf7e2e5 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/A/fb9ba5cc02d943d7b70e96abebf7e2e5 2024-11-27T16:21:15,352 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/A/fb9ba5cc02d943d7b70e96abebf7e2e5, entries=200, sequenceid=449, filesize=14.4 K 2024-11-27T16:21:15,353 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/B/d3888138403f436688e154ea819dad26 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/B/d3888138403f436688e154ea819dad26 2024-11-27T16:21:15,359 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/B/d3888138403f436688e154ea819dad26, entries=150, sequenceid=449, filesize=12.0 K 2024-11-27T16:21:15,361 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/C/916e7fc2df234ef898122edf189559f2 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/C/916e7fc2df234ef898122edf189559f2 2024-11-27T16:21:15,374 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/C/916e7fc2df234ef898122edf189559f2, entries=150, sequenceid=449, filesize=12.0 K 2024-11-27T16:21:15,375 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=127.47 KB/130530 for d498a187112eb3635082ffac2dfb4cf9 in 521ms, sequenceid=449, compaction requested=false 2024-11-27T16:21:15,375 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for d498a187112eb3635082ffac2dfb4cf9: 2024-11-27T16:21:15,412 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:21:15,413 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=27 2024-11-27T16:21:15,413 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9. 2024-11-27T16:21:15,414 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegion(2837): Flushing d498a187112eb3635082ffac2dfb4cf9 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-11-27T16:21:15,414 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d498a187112eb3635082ffac2dfb4cf9, store=A 2024-11-27T16:21:15,414 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:21:15,414 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d498a187112eb3635082ffac2dfb4cf9, store=B 2024-11-27T16:21:15,414 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:21:15,414 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d498a187112eb3635082ffac2dfb4cf9, store=C 2024-11-27T16:21:15,414 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:21:15,422 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/A/ede8d8bfb67245e781f4b91b091e5879 is 50, key is test_row_0/A:col10/1732724474898/Put/seqid=0 2024-11-27T16:21:15,427 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073741935_1111 (size=12301) 2024-11-27T16:21:15,428 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=472 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/A/ede8d8bfb67245e781f4b91b091e5879 2024-11-27T16:21:15,438 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/B/292cb4f3584f41fc83ca5f316d718469 is 50, key is test_row_0/B:col10/1732724474898/Put/seqid=0 2024-11-27T16:21:15,445 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073741936_1112 (size=12301) 2024-11-27T16:21:15,446 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=472 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/B/292cb4f3584f41fc83ca5f316d718469 2024-11-27T16:21:15,459 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/C/c543db472ff3490594ea25d73f2a28bf is 50, key is test_row_0/C:col10/1732724474898/Put/seqid=0 2024-11-27T16:21:15,469 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073741937_1113 (size=12301) 2024-11-27T16:21:15,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(8581): Flush requested on d498a187112eb3635082ffac2dfb4cf9 2024-11-27T16:21:15,535 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9. as already flushing 2024-11-27T16:21:15,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=26 2024-11-27T16:21:15,590 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:15,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 307 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41100 deadline: 1732724535588, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:15,693 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:15,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 309 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41100 deadline: 1732724535692, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:15,870 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=472 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/C/c543db472ff3490594ea25d73f2a28bf 2024-11-27T16:21:15,880 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/A/ede8d8bfb67245e781f4b91b091e5879 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/A/ede8d8bfb67245e781f4b91b091e5879 2024-11-27T16:21:15,886 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/A/ede8d8bfb67245e781f4b91b091e5879, entries=150, sequenceid=472, filesize=12.0 K 2024-11-27T16:21:15,888 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/B/292cb4f3584f41fc83ca5f316d718469 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/B/292cb4f3584f41fc83ca5f316d718469 2024-11-27T16:21:15,893 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/B/292cb4f3584f41fc83ca5f316d718469, entries=150, sequenceid=472, filesize=12.0 K 2024-11-27T16:21:15,894 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/C/c543db472ff3490594ea25d73f2a28bf as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/C/c543db472ff3490594ea25d73f2a28bf 2024-11-27T16:21:15,897 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:15,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 311 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41100 deadline: 1732724535896, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:15,899 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/C/c543db472ff3490594ea25d73f2a28bf, entries=150, sequenceid=472, filesize=12.0 K 2024-11-27T16:21:15,900 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegion(3040): Finished flush of dataSize ~127.47 KB/130530, heapSize ~334.69 KB/342720, currentSize=73.80 KB/75570 for d498a187112eb3635082ffac2dfb4cf9 in 486ms, sequenceid=472, compaction requested=true 2024-11-27T16:21:15,900 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegion(2538): Flush status journal for d498a187112eb3635082ffac2dfb4cf9: 2024-11-27T16:21:15,900 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9. 2024-11-27T16:21:15,900 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=27 2024-11-27T16:21:15,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4106): Remote procedure done, pid=27 2024-11-27T16:21:15,903 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=27, resume processing ppid=26 2024-11-27T16:21:15,903 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=27, ppid=26, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 948 msec 2024-11-27T16:21:15,905 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=26, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=26, table=TestAcidGuarantees in 954 msec 2024-11-27T16:21:16,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=26 2024-11-27T16:21:16,056 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 26 completed 2024-11-27T16:21:16,058 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-27T16:21:16,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] procedure2.ProcedureExecutor(1098): Stored pid=28, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=28, table=TestAcidGuarantees 2024-11-27T16:21:16,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=28 2024-11-27T16:21:16,060 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=28, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=28, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-27T16:21:16,060 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=28, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=28, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-27T16:21:16,061 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=29, ppid=28, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-27T16:21:16,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=28 2024-11-27T16:21:16,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(8581): Flush requested on d498a187112eb3635082ffac2dfb4cf9 2024-11-27T16:21:16,201 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing d498a187112eb3635082ffac2dfb4cf9 3/3 column families, dataSize=80.51 KB heapSize=211.69 KB 2024-11-27T16:21:16,202 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d498a187112eb3635082ffac2dfb4cf9, store=A 2024-11-27T16:21:16,202 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:21:16,202 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d498a187112eb3635082ffac2dfb4cf9, store=B 2024-11-27T16:21:16,202 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:21:16,202 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d498a187112eb3635082ffac2dfb4cf9, store=C 2024-11-27T16:21:16,202 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:21:16,208 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/A/6b6de4a134e448bdb9996e6b0fde1097 is 50, key is test_row_0/A:col10/1732724475587/Put/seqid=0 2024-11-27T16:21:16,212 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:21:16,213 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=29 2024-11-27T16:21:16,213 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9. 2024-11-27T16:21:16,213 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9. as already flushing 2024-11-27T16:21:16,213 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9. 2024-11-27T16:21:16,213 ERROR [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] handler.RSProcedureHandler(58): pid=29 java.io.IOException: Unable to complete flush {ENCODED => d498a187112eb3635082ffac2dfb4cf9, NAME => 'TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:21:16,213 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=29 java.io.IOException: Unable to complete flush {ENCODED => d498a187112eb3635082ffac2dfb4cf9, NAME => 'TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:21:16,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4114): Remote procedure failed, pid=29 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d498a187112eb3635082ffac2dfb4cf9, NAME => 'TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d498a187112eb3635082ffac2dfb4cf9, NAME => 'TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:21:16,219 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073741938_1114 (size=14741) 2024-11-27T16:21:16,247 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:16,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 332 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41100 deadline: 1732724536247, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:16,350 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:16,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 334 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41100 deadline: 1732724536349, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:16,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=28 2024-11-27T16:21:16,370 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:21:16,371 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=29 2024-11-27T16:21:16,371 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9. 2024-11-27T16:21:16,371 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9. as already flushing 2024-11-27T16:21:16,371 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9. 2024-11-27T16:21:16,371 ERROR [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] handler.RSProcedureHandler(58): pid=29 java.io.IOException: Unable to complete flush {ENCODED => d498a187112eb3635082ffac2dfb4cf9, NAME => 'TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:21:16,371 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=29 java.io.IOException: Unable to complete flush {ENCODED => d498a187112eb3635082ffac2dfb4cf9, NAME => 'TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:21:16,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4114): Remote procedure failed, pid=29 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d498a187112eb3635082ffac2dfb4cf9, NAME => 'TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d498a187112eb3635082ffac2dfb4cf9, NAME => 'TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:21:16,523 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:21:16,524 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=29 2024-11-27T16:21:16,524 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9. 2024-11-27T16:21:16,524 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9. as already flushing 2024-11-27T16:21:16,524 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9. 2024-11-27T16:21:16,524 ERROR [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] handler.RSProcedureHandler(58): pid=29 java.io.IOException: Unable to complete flush {ENCODED => d498a187112eb3635082ffac2dfb4cf9, NAME => 'TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:21:16,525 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=29 java.io.IOException: Unable to complete flush {ENCODED => d498a187112eb3635082ffac2dfb4cf9, NAME => 'TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:21:16,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4114): Remote procedure failed, pid=29 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d498a187112eb3635082ffac2dfb4cf9, NAME => 'TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d498a187112eb3635082ffac2dfb4cf9, NAME => 'TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:21:16,553 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:16,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 336 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41100 deadline: 1732724536552, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:16,621 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=487 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/A/6b6de4a134e448bdb9996e6b0fde1097 2024-11-27T16:21:16,633 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/B/4658ce64c863427f829e2e4389cc26e2 is 50, key is test_row_0/B:col10/1732724475587/Put/seqid=0 2024-11-27T16:21:16,646 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073741939_1115 (size=12301) 2024-11-27T16:21:16,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=28 2024-11-27T16:21:16,677 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:21:16,677 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=29 2024-11-27T16:21:16,678 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9. 2024-11-27T16:21:16,678 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9. as already flushing 2024-11-27T16:21:16,678 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9. 2024-11-27T16:21:16,678 ERROR [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] handler.RSProcedureHandler(58): pid=29 java.io.IOException: Unable to complete flush {ENCODED => d498a187112eb3635082ffac2dfb4cf9, NAME => 'TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:21:16,678 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=29 java.io.IOException: Unable to complete flush {ENCODED => d498a187112eb3635082ffac2dfb4cf9, NAME => 'TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:21:16,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4114): Remote procedure failed, pid=29 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d498a187112eb3635082ffac2dfb4cf9, NAME => 'TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d498a187112eb3635082ffac2dfb4cf9, NAME => 'TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:21:16,831 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:21:16,831 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=29 2024-11-27T16:21:16,831 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9. 2024-11-27T16:21:16,831 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9. as already flushing 2024-11-27T16:21:16,831 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9. 2024-11-27T16:21:16,832 ERROR [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] handler.RSProcedureHandler(58): pid=29 java.io.IOException: Unable to complete flush {ENCODED => d498a187112eb3635082ffac2dfb4cf9, NAME => 'TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:21:16,832 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=29 java.io.IOException: Unable to complete flush {ENCODED => d498a187112eb3635082ffac2dfb4cf9, NAME => 'TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:21:16,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4114): Remote procedure failed, pid=29 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d498a187112eb3635082ffac2dfb4cf9, NAME => 'TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d498a187112eb3635082ffac2dfb4cf9, NAME => 'TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:21:16,858 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:16,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 338 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41100 deadline: 1732724536855, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:16,953 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:16,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41120 deadline: 1732724536952, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:16,954 DEBUG [Thread-149 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8151 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9., hostname=7b191dec6496,44169,1732724452967, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-27T16:21:16,966 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:16,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41106 deadline: 1732724536963, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:16,967 DEBUG [Thread-155 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8161 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9., hostname=7b191dec6496,44169,1732724452967, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-27T16:21:16,971 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:16,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41170 deadline: 1732724536969, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:16,972 DEBUG [Thread-151 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8166 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9., hostname=7b191dec6496,44169,1732724452967, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-27T16:21:16,980 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:16,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41130 deadline: 1732724536978, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:16,981 DEBUG [Thread-153 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8176 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9., hostname=7b191dec6496,44169,1732724452967, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-27T16:21:16,984 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:21:16,984 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=29 2024-11-27T16:21:16,984 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9. 2024-11-27T16:21:16,985 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9. as already flushing 2024-11-27T16:21:16,985 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9. 2024-11-27T16:21:16,985 ERROR [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] handler.RSProcedureHandler(58): pid=29 java.io.IOException: Unable to complete flush {ENCODED => d498a187112eb3635082ffac2dfb4cf9, NAME => 'TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:21:16,985 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=29 java.io.IOException: Unable to complete flush {ENCODED => d498a187112eb3635082ffac2dfb4cf9, NAME => 'TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:21:16,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4114): Remote procedure failed, pid=29 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d498a187112eb3635082ffac2dfb4cf9, NAME => 'TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d498a187112eb3635082ffac2dfb4cf9, NAME => 'TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:21:17,048 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=487 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/B/4658ce64c863427f829e2e4389cc26e2 2024-11-27T16:21:17,062 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/C/4ba7c66f45d6479ba964dba50be9456d is 50, key is test_row_0/C:col10/1732724475587/Put/seqid=0 2024-11-27T16:21:17,077 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073741940_1116 (size=12301) 2024-11-27T16:21:17,137 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:21:17,137 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=29 2024-11-27T16:21:17,137 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9. 2024-11-27T16:21:17,138 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9. as already flushing 2024-11-27T16:21:17,138 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9. 2024-11-27T16:21:17,138 ERROR [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] handler.RSProcedureHandler(58): pid=29 java.io.IOException: Unable to complete flush {ENCODED => d498a187112eb3635082ffac2dfb4cf9, NAME => 'TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:21:17,138 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=29 java.io.IOException: Unable to complete flush {ENCODED => d498a187112eb3635082ffac2dfb4cf9, NAME => 'TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:21:17,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4114): Remote procedure failed, pid=29 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d498a187112eb3635082ffac2dfb4cf9, NAME => 'TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d498a187112eb3635082ffac2dfb4cf9, NAME => 'TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:21:17,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=28 2024-11-27T16:21:17,290 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:21:17,291 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=29 2024-11-27T16:21:17,291 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9. 2024-11-27T16:21:17,291 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9. as already flushing 2024-11-27T16:21:17,291 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9. 2024-11-27T16:21:17,291 ERROR [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] handler.RSProcedureHandler(58): pid=29 java.io.IOException: Unable to complete flush {ENCODED => d498a187112eb3635082ffac2dfb4cf9, NAME => 'TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:21:17,292 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=29 java.io.IOException: Unable to complete flush {ENCODED => d498a187112eb3635082ffac2dfb4cf9, NAME => 'TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:21:17,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4114): Remote procedure failed, pid=29 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d498a187112eb3635082ffac2dfb4cf9, NAME => 'TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d498a187112eb3635082ffac2dfb4cf9, NAME => 'TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:21:17,361 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:17,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 340 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41100 deadline: 1732724537361, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:17,444 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:21:17,445 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=29 2024-11-27T16:21:17,445 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9. 2024-11-27T16:21:17,445 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9. as already flushing 2024-11-27T16:21:17,445 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9. 2024-11-27T16:21:17,445 ERROR [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] handler.RSProcedureHandler(58): pid=29 java.io.IOException: Unable to complete flush {ENCODED => d498a187112eb3635082ffac2dfb4cf9, NAME => 'TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:21:17,445 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=29 java.io.IOException: Unable to complete flush {ENCODED => d498a187112eb3635082ffac2dfb4cf9, NAME => 'TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:21:17,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4114): Remote procedure failed, pid=29 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d498a187112eb3635082ffac2dfb4cf9, NAME => 'TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d498a187112eb3635082ffac2dfb4cf9, NAME => 'TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:21:17,477 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=487 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/C/4ba7c66f45d6479ba964dba50be9456d 2024-11-27T16:21:17,484 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/A/6b6de4a134e448bdb9996e6b0fde1097 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/A/6b6de4a134e448bdb9996e6b0fde1097 2024-11-27T16:21:17,489 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/A/6b6de4a134e448bdb9996e6b0fde1097, entries=200, sequenceid=487, filesize=14.4 K 2024-11-27T16:21:17,490 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/B/4658ce64c863427f829e2e4389cc26e2 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/B/4658ce64c863427f829e2e4389cc26e2 2024-11-27T16:21:17,496 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/B/4658ce64c863427f829e2e4389cc26e2, entries=150, sequenceid=487, filesize=12.0 K 2024-11-27T16:21:17,501 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/C/4ba7c66f45d6479ba964dba50be9456d as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/C/4ba7c66f45d6479ba964dba50be9456d 2024-11-27T16:21:17,507 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/C/4ba7c66f45d6479ba964dba50be9456d, entries=150, sequenceid=487, filesize=12.0 K 2024-11-27T16:21:17,508 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=120.76 KB/123660 for d498a187112eb3635082ffac2dfb4cf9 in 1307ms, sequenceid=487, compaction requested=true 2024-11-27T16:21:17,508 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for d498a187112eb3635082ffac2dfb4cf9: 2024-11-27T16:21:17,508 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d498a187112eb3635082ffac2dfb4cf9:A, priority=-2147483648, current under compaction store size is 1 2024-11-27T16:21:17,508 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-27T16:21:17,508 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T16:21:17,508 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d498a187112eb3635082ffac2dfb4cf9:B, priority=-2147483648, current under compaction store size is 2 2024-11-27T16:21:17,508 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T16:21:17,508 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d498a187112eb3635082ffac2dfb4cf9:C, priority=-2147483648, current under compaction store size is 3 2024-11-27T16:21:17,508 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-27T16:21:17,508 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-27T16:21:17,510 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 50226 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-27T16:21:17,510 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 55106 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-27T16:21:17,510 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HStore(1540): d498a187112eb3635082ffac2dfb4cf9/A is initiating minor compaction (all files) 2024-11-27T16:21:17,510 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HStore(1540): d498a187112eb3635082ffac2dfb4cf9/B is initiating minor compaction (all files) 2024-11-27T16:21:17,510 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d498a187112eb3635082ffac2dfb4cf9/A in TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9. 2024-11-27T16:21:17,510 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d498a187112eb3635082ffac2dfb4cf9/B in TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9. 2024-11-27T16:21:17,510 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/A/30339113df074e55b9b819a2aea5d318, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/A/fb9ba5cc02d943d7b70e96abebf7e2e5, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/A/ede8d8bfb67245e781f4b91b091e5879, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/A/6b6de4a134e448bdb9996e6b0fde1097] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp, totalSize=53.8 K 2024-11-27T16:21:17,510 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/B/2f0df801ee0c4735aacfe87e30ba2189, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/B/d3888138403f436688e154ea819dad26, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/B/292cb4f3584f41fc83ca5f316d718469, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/B/4658ce64c863427f829e2e4389cc26e2] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp, totalSize=49.0 K 2024-11-27T16:21:17,511 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 30339113df074e55b9b819a2aea5d318, keycount=150, bloomtype=ROW, size=13.0 K, encoding=NONE, compression=NONE, seqNum=433, earliestPutTs=1732724473888 2024-11-27T16:21:17,511 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.Compactor(224): Compacting fb9ba5cc02d943d7b70e96abebf7e2e5, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=449, earliestPutTs=1732724474218 2024-11-27T16:21:17,511 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting 2f0df801ee0c4735aacfe87e30ba2189, keycount=150, bloomtype=ROW, size=13.0 K, encoding=NONE, compression=NONE, seqNum=433, earliestPutTs=1732724473888 2024-11-27T16:21:17,512 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting d3888138403f436688e154ea819dad26, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=449, earliestPutTs=1732724474226 2024-11-27T16:21:17,512 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.Compactor(224): Compacting ede8d8bfb67245e781f4b91b091e5879, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=472, earliestPutTs=1732724474898 2024-11-27T16:21:17,512 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6b6de4a134e448bdb9996e6b0fde1097, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=487, earliestPutTs=1732724475531 2024-11-27T16:21:17,512 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting 292cb4f3584f41fc83ca5f316d718469, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=472, earliestPutTs=1732724474898 2024-11-27T16:21:17,513 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting 4658ce64c863427f829e2e4389cc26e2, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=487, earliestPutTs=1732724475584 2024-11-27T16:21:17,523 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): d498a187112eb3635082ffac2dfb4cf9#B#compaction#102 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-27T16:21:17,524 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/B/97489c4435174c56a26afc51575de07b is 50, key is test_row_0/B:col10/1732724475587/Put/seqid=0 2024-11-27T16:21:17,526 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): d498a187112eb3635082ffac2dfb4cf9#A#compaction#103 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-27T16:21:17,527 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/A/41d279b4b4ab43508ba4bff8d9cfe3e0 is 50, key is test_row_0/A:col10/1732724475587/Put/seqid=0 2024-11-27T16:21:17,530 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073741941_1117 (size=13459) 2024-11-27T16:21:17,537 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/B/97489c4435174c56a26afc51575de07b as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/B/97489c4435174c56a26afc51575de07b 2024-11-27T16:21:17,543 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in d498a187112eb3635082ffac2dfb4cf9/B of d498a187112eb3635082ffac2dfb4cf9 into 97489c4435174c56a26afc51575de07b(size=13.1 K), total size for store is 13.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T16:21:17,543 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d498a187112eb3635082ffac2dfb4cf9: 2024-11-27T16:21:17,543 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9., storeName=d498a187112eb3635082ffac2dfb4cf9/B, priority=12, startTime=1732724477508; duration=0sec 2024-11-27T16:21:17,543 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-27T16:21:17,543 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d498a187112eb3635082ffac2dfb4cf9:B 2024-11-27T16:21:17,544 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-27T16:21:17,546 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 50226 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-27T16:21:17,546 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HStore(1540): d498a187112eb3635082ffac2dfb4cf9/C is initiating minor compaction (all files) 2024-11-27T16:21:17,546 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d498a187112eb3635082ffac2dfb4cf9/C in TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9. 2024-11-27T16:21:17,546 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/C/86e8ba91d9594b7fb1d0fc19ed3e8bcf, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/C/916e7fc2df234ef898122edf189559f2, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/C/c543db472ff3490594ea25d73f2a28bf, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/C/4ba7c66f45d6479ba964dba50be9456d] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp, totalSize=49.0 K 2024-11-27T16:21:17,547 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting 86e8ba91d9594b7fb1d0fc19ed3e8bcf, keycount=150, bloomtype=ROW, size=13.0 K, encoding=NONE, compression=NONE, seqNum=433, earliestPutTs=1732724473888 2024-11-27T16:21:17,548 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting 916e7fc2df234ef898122edf189559f2, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=449, earliestPutTs=1732724474226 2024-11-27T16:21:17,549 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting c543db472ff3490594ea25d73f2a28bf, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=472, earliestPutTs=1732724474898 2024-11-27T16:21:17,549 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting 4ba7c66f45d6479ba964dba50be9456d, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=487, earliestPutTs=1732724475584 2024-11-27T16:21:17,554 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073741942_1118 (size=13459) 2024-11-27T16:21:17,563 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/A/41d279b4b4ab43508ba4bff8d9cfe3e0 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/A/41d279b4b4ab43508ba4bff8d9cfe3e0 2024-11-27T16:21:17,569 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): d498a187112eb3635082ffac2dfb4cf9#C#compaction#104 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-27T16:21:17,570 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/C/666d8f62e2154ae38c220121153551cb is 50, key is test_row_0/C:col10/1732724475587/Put/seqid=0 2024-11-27T16:21:17,574 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in d498a187112eb3635082ffac2dfb4cf9/A of d498a187112eb3635082ffac2dfb4cf9 into 41d279b4b4ab43508ba4bff8d9cfe3e0(size=13.1 K), total size for store is 13.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T16:21:17,574 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d498a187112eb3635082ffac2dfb4cf9: 2024-11-27T16:21:17,574 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9., storeName=d498a187112eb3635082ffac2dfb4cf9/A, priority=12, startTime=1732724477508; duration=0sec 2024-11-27T16:21:17,574 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T16:21:17,574 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d498a187112eb3635082ffac2dfb4cf9:A 2024-11-27T16:21:17,593 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073741943_1119 (size=13459) 2024-11-27T16:21:17,597 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:21:17,598 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=29 2024-11-27T16:21:17,598 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9. 2024-11-27T16:21:17,599 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegion(2837): Flushing d498a187112eb3635082ffac2dfb4cf9 3/3 column families, dataSize=120.76 KB heapSize=317.16 KB 2024-11-27T16:21:17,599 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d498a187112eb3635082ffac2dfb4cf9, store=A 2024-11-27T16:21:17,599 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:21:17,599 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d498a187112eb3635082ffac2dfb4cf9, store=B 2024-11-27T16:21:17,599 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:21:17,599 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d498a187112eb3635082ffac2dfb4cf9, store=C 2024-11-27T16:21:17,599 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:21:17,606 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/A/095647230e3849498284b50af952c723 is 50, key is test_row_0/A:col10/1732724476245/Put/seqid=0 2024-11-27T16:21:17,612 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073741944_1120 (size=12301) 2024-11-27T16:21:17,614 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=510 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/A/095647230e3849498284b50af952c723 2024-11-27T16:21:17,623 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/B/b4a2a0a048774ccaa8011fbb6d5fbef7 is 50, key is test_row_0/B:col10/1732724476245/Put/seqid=0 2024-11-27T16:21:17,630 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073741945_1121 (size=12301) 2024-11-27T16:21:18,001 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/C/666d8f62e2154ae38c220121153551cb as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/C/666d8f62e2154ae38c220121153551cb 2024-11-27T16:21:18,007 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in d498a187112eb3635082ffac2dfb4cf9/C of d498a187112eb3635082ffac2dfb4cf9 into 666d8f62e2154ae38c220121153551cb(size=13.1 K), total size for store is 13.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T16:21:18,007 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d498a187112eb3635082ffac2dfb4cf9: 2024-11-27T16:21:18,008 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9., storeName=d498a187112eb3635082ffac2dfb4cf9/C, priority=12, startTime=1732724477508; duration=0sec 2024-11-27T16:21:18,008 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T16:21:18,008 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d498a187112eb3635082ffac2dfb4cf9:C 2024-11-27T16:21:18,031 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=510 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/B/b4a2a0a048774ccaa8011fbb6d5fbef7 2024-11-27T16:21:18,040 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/C/12c1431e083a4c7cbf75f7556d6a909b is 50, key is test_row_0/C:col10/1732724476245/Put/seqid=0 2024-11-27T16:21:18,045 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073741946_1122 (size=12301) 2024-11-27T16:21:18,045 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=510 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/C/12c1431e083a4c7cbf75f7556d6a909b 2024-11-27T16:21:18,051 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/A/095647230e3849498284b50af952c723 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/A/095647230e3849498284b50af952c723 2024-11-27T16:21:18,056 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/A/095647230e3849498284b50af952c723, entries=150, sequenceid=510, filesize=12.0 K 2024-11-27T16:21:18,057 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/B/b4a2a0a048774ccaa8011fbb6d5fbef7 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/B/b4a2a0a048774ccaa8011fbb6d5fbef7 2024-11-27T16:21:18,063 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/B/b4a2a0a048774ccaa8011fbb6d5fbef7, entries=150, sequenceid=510, filesize=12.0 K 2024-11-27T16:21:18,066 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/C/12c1431e083a4c7cbf75f7556d6a909b as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/C/12c1431e083a4c7cbf75f7556d6a909b 2024-11-27T16:21:18,072 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/C/12c1431e083a4c7cbf75f7556d6a909b, entries=150, sequenceid=510, filesize=12.0 K 2024-11-27T16:21:18,074 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegion(3040): Finished flush of dataSize ~120.76 KB/123660, heapSize ~317.11 KB/324720, currentSize=0 B/0 for d498a187112eb3635082ffac2dfb4cf9 in 476ms, sequenceid=510, compaction requested=false 2024-11-27T16:21:18,074 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegion(2538): Flush status journal for d498a187112eb3635082ffac2dfb4cf9: 2024-11-27T16:21:18,074 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9. 2024-11-27T16:21:18,074 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=29 2024-11-27T16:21:18,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4106): Remote procedure done, pid=29 2024-11-27T16:21:18,080 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=29, resume processing ppid=28 2024-11-27T16:21:18,080 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=29, ppid=28, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.0150 sec 2024-11-27T16:21:18,082 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=28, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=28, table=TestAcidGuarantees in 2.0230 sec 2024-11-27T16:21:18,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=28 2024-11-27T16:21:18,166 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 28 completed 2024-11-27T16:21:18,169 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-27T16:21:18,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] procedure2.ProcedureExecutor(1098): Stored pid=30, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=30, table=TestAcidGuarantees 2024-11-27T16:21:18,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=30 2024-11-27T16:21:18,171 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=30, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=30, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-27T16:21:18,172 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=30, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=30, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-27T16:21:18,172 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=31, ppid=30, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-27T16:21:18,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=30 2024-11-27T16:21:18,324 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:21:18,325 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=31 2024-11-27T16:21:18,325 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9. 2024-11-27T16:21:18,325 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HRegion(2538): Flush status journal for d498a187112eb3635082ffac2dfb4cf9: 2024-11-27T16:21:18,325 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9. 2024-11-27T16:21:18,325 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=31 2024-11-27T16:21:18,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4106): Remote procedure done, pid=31 2024-11-27T16:21:18,328 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=31, resume processing ppid=30 2024-11-27T16:21:18,329 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=31, ppid=30, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 154 msec 2024-11-27T16:21:18,330 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=30, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=30, table=TestAcidGuarantees in 160 msec 2024-11-27T16:21:18,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(8581): Flush requested on d498a187112eb3635082ffac2dfb4cf9 2024-11-27T16:21:18,380 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing d498a187112eb3635082ffac2dfb4cf9 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-27T16:21:18,380 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d498a187112eb3635082ffac2dfb4cf9, store=A 2024-11-27T16:21:18,381 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:21:18,381 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d498a187112eb3635082ffac2dfb4cf9, store=B 2024-11-27T16:21:18,381 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:21:18,381 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d498a187112eb3635082ffac2dfb4cf9, store=C 2024-11-27T16:21:18,381 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:21:18,387 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/A/e9b4a71a259f43a380479e12d2daa704 is 50, key is test_row_0/A:col10/1732724478373/Put/seqid=0 2024-11-27T16:21:18,416 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073741947_1123 (size=14741) 2024-11-27T16:21:18,449 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:18,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 372 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41100 deadline: 1732724538448, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:18,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=30 2024-11-27T16:21:18,473 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 30 completed 2024-11-27T16:21:18,475 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-27T16:21:18,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] procedure2.ProcedureExecutor(1098): Stored pid=32, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=32, table=TestAcidGuarantees 2024-11-27T16:21:18,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=32 2024-11-27T16:21:18,477 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=32, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=32, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-27T16:21:18,478 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=32, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=32, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-27T16:21:18,478 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=33, ppid=32, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-27T16:21:18,553 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:18,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 374 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41100 deadline: 1732724538551, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:18,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=32 2024-11-27T16:21:18,629 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:21:18,630 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=33 2024-11-27T16:21:18,630 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9. 2024-11-27T16:21:18,630 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9. as already flushing 2024-11-27T16:21:18,630 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9. 2024-11-27T16:21:18,630 ERROR [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=33}] handler.RSProcedureHandler(58): pid=33 java.io.IOException: Unable to complete flush {ENCODED => d498a187112eb3635082ffac2dfb4cf9, NAME => 'TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:21:18,631 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=33 java.io.IOException: Unable to complete flush {ENCODED => d498a187112eb3635082ffac2dfb4cf9, NAME => 'TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:21:18,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4114): Remote procedure failed, pid=33 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d498a187112eb3635082ffac2dfb4cf9, NAME => 'TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d498a187112eb3635082ffac2dfb4cf9, NAME => 'TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:21:18,756 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:18,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 376 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41100 deadline: 1732724538755, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:18,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=32 2024-11-27T16:21:18,784 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:21:18,784 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=33 2024-11-27T16:21:18,784 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9. 2024-11-27T16:21:18,785 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9. as already flushing 2024-11-27T16:21:18,785 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9. 2024-11-27T16:21:18,785 ERROR [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] handler.RSProcedureHandler(58): pid=33 java.io.IOException: Unable to complete flush {ENCODED => d498a187112eb3635082ffac2dfb4cf9, NAME => 'TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:21:18,785 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=33 java.io.IOException: Unable to complete flush {ENCODED => d498a187112eb3635082ffac2dfb4cf9, NAME => 'TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:21:18,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4114): Remote procedure failed, pid=33 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d498a187112eb3635082ffac2dfb4cf9, NAME => 'TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d498a187112eb3635082ffac2dfb4cf9, NAME => 'TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:21:18,817 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=523 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/A/e9b4a71a259f43a380479e12d2daa704 2024-11-27T16:21:18,826 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/B/d6a09247514e4833987c58b0b73dcff5 is 50, key is test_row_0/B:col10/1732724478373/Put/seqid=0 2024-11-27T16:21:18,840 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073741948_1124 (size=12301) 2024-11-27T16:21:18,937 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:21:18,938 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=33 2024-11-27T16:21:18,938 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9. 2024-11-27T16:21:18,938 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9. as already flushing 2024-11-27T16:21:18,938 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9. 2024-11-27T16:21:18,938 ERROR [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=33}] handler.RSProcedureHandler(58): pid=33 java.io.IOException: Unable to complete flush {ENCODED => d498a187112eb3635082ffac2dfb4cf9, NAME => 'TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:21:18,938 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=33 java.io.IOException: Unable to complete flush {ENCODED => d498a187112eb3635082ffac2dfb4cf9, NAME => 'TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:21:18,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4114): Remote procedure failed, pid=33 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d498a187112eb3635082ffac2dfb4cf9, NAME => 'TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d498a187112eb3635082ffac2dfb4cf9, NAME => 'TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:21:18,946 DEBUG [Thread-162 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x6ebb9f30 to 127.0.0.1:51088 2024-11-27T16:21:18,946 DEBUG [Thread-162 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-27T16:21:18,947 DEBUG [Thread-166 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x426bcd11 to 127.0.0.1:51088 2024-11-27T16:21:18,947 DEBUG [Thread-166 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-27T16:21:18,948 DEBUG [Thread-160 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x4f34c0b8 to 127.0.0.1:51088 2024-11-27T16:21:18,948 DEBUG [Thread-160 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-27T16:21:18,949 DEBUG [Thread-164 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x3f18a09d to 127.0.0.1:51088 2024-11-27T16:21:18,949 DEBUG [Thread-164 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-27T16:21:19,060 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:19,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 378 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41100 deadline: 1732724539060, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:19,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=32 2024-11-27T16:21:19,090 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:21:19,091 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=33 2024-11-27T16:21:19,091 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9. 2024-11-27T16:21:19,091 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9. as already flushing 2024-11-27T16:21:19,091 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9. 2024-11-27T16:21:19,091 ERROR [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=33}] handler.RSProcedureHandler(58): pid=33 java.io.IOException: Unable to complete flush {ENCODED => d498a187112eb3635082ffac2dfb4cf9, NAME => 'TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:21:19,092 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=33 java.io.IOException: Unable to complete flush {ENCODED => d498a187112eb3635082ffac2dfb4cf9, NAME => 'TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:21:19,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4114): Remote procedure failed, pid=33 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d498a187112eb3635082ffac2dfb4cf9, NAME => 'TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d498a187112eb3635082ffac2dfb4cf9, NAME => 'TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:21:19,241 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=523 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/B/d6a09247514e4833987c58b0b73dcff5 2024-11-27T16:21:19,243 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:21:19,243 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=33 2024-11-27T16:21:19,244 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9. 2024-11-27T16:21:19,244 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9. as already flushing 2024-11-27T16:21:19,244 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9. 2024-11-27T16:21:19,244 ERROR [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] handler.RSProcedureHandler(58): pid=33 java.io.IOException: Unable to complete flush {ENCODED => d498a187112eb3635082ffac2dfb4cf9, NAME => 'TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:21:19,244 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=33 java.io.IOException: Unable to complete flush {ENCODED => d498a187112eb3635082ffac2dfb4cf9, NAME => 'TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:21:19,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4114): Remote procedure failed, pid=33 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d498a187112eb3635082ffac2dfb4cf9, NAME => 'TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d498a187112eb3635082ffac2dfb4cf9, NAME => 'TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:21:19,249 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/C/ffb4700954904678b4ee27e914b244ef is 50, key is test_row_0/C:col10/1732724478373/Put/seqid=0 2024-11-27T16:21:19,253 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073741949_1125 (size=12301) 2024-11-27T16:21:19,396 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:21:19,397 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=33 2024-11-27T16:21:19,397 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9. 2024-11-27T16:21:19,397 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9. as already flushing 2024-11-27T16:21:19,397 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9. 2024-11-27T16:21:19,397 ERROR [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=33}] handler.RSProcedureHandler(58): pid=33 java.io.IOException: Unable to complete flush {ENCODED => d498a187112eb3635082ffac2dfb4cf9, NAME => 'TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:21:19,398 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=33 java.io.IOException: Unable to complete flush {ENCODED => d498a187112eb3635082ffac2dfb4cf9, NAME => 'TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:21:19,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4114): Remote procedure failed, pid=33 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d498a187112eb3635082ffac2dfb4cf9, NAME => 'TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d498a187112eb3635082ffac2dfb4cf9, NAME => 'TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:21:19,549 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:21:19,550 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=33 2024-11-27T16:21:19,550 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9. 2024-11-27T16:21:19,550 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9. as already flushing 2024-11-27T16:21:19,550 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9. 2024-11-27T16:21:19,550 ERROR [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=33}] handler.RSProcedureHandler(58): pid=33 java.io.IOException: Unable to complete flush {ENCODED => d498a187112eb3635082ffac2dfb4cf9, NAME => 'TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:21:19,551 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=33 java.io.IOException: Unable to complete flush {ENCODED => d498a187112eb3635082ffac2dfb4cf9, NAME => 'TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:21:19,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4114): Remote procedure failed, pid=33 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d498a187112eb3635082ffac2dfb4cf9, NAME => 'TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d498a187112eb3635082ffac2dfb4cf9, NAME => 'TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:21:19,565 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:19,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 380 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41100 deadline: 1732724539565, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:19,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=32 2024-11-27T16:21:19,654 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=523 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/C/ffb4700954904678b4ee27e914b244ef 2024-11-27T16:21:19,659 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/A/e9b4a71a259f43a380479e12d2daa704 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/A/e9b4a71a259f43a380479e12d2daa704 2024-11-27T16:21:19,664 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/A/e9b4a71a259f43a380479e12d2daa704, entries=200, sequenceid=523, filesize=14.4 K 2024-11-27T16:21:19,665 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/B/d6a09247514e4833987c58b0b73dcff5 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/B/d6a09247514e4833987c58b0b73dcff5 2024-11-27T16:21:19,669 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/B/d6a09247514e4833987c58b0b73dcff5, entries=150, sequenceid=523, filesize=12.0 K 2024-11-27T16:21:19,670 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/C/ffb4700954904678b4ee27e914b244ef as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/C/ffb4700954904678b4ee27e914b244ef 2024-11-27T16:21:19,675 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/C/ffb4700954904678b4ee27e914b244ef, entries=150, sequenceid=523, filesize=12.0 K 2024-11-27T16:21:19,676 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for d498a187112eb3635082ffac2dfb4cf9 in 1296ms, sequenceid=523, compaction requested=true 2024-11-27T16:21:19,676 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for d498a187112eb3635082ffac2dfb4cf9: 2024-11-27T16:21:19,676 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d498a187112eb3635082ffac2dfb4cf9:A, priority=-2147483648, current under compaction store size is 1 2024-11-27T16:21:19,676 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T16:21:19,676 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d498a187112eb3635082ffac2dfb4cf9:B, priority=-2147483648, current under compaction store size is 2 2024-11-27T16:21:19,676 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T16:21:19,676 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-27T16:21:19,676 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d498a187112eb3635082ffac2dfb4cf9:C, priority=-2147483648, current under compaction store size is 3 2024-11-27T16:21:19,676 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-27T16:21:19,676 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-27T16:21:19,677 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 40501 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-27T16:21:19,677 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 38061 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-27T16:21:19,678 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HStore(1540): d498a187112eb3635082ffac2dfb4cf9/B is initiating minor compaction (all files) 2024-11-27T16:21:19,678 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HStore(1540): d498a187112eb3635082ffac2dfb4cf9/A is initiating minor compaction (all files) 2024-11-27T16:21:19,678 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d498a187112eb3635082ffac2dfb4cf9/B in TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9. 2024-11-27T16:21:19,678 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d498a187112eb3635082ffac2dfb4cf9/A in TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9. 2024-11-27T16:21:19,678 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/B/97489c4435174c56a26afc51575de07b, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/B/b4a2a0a048774ccaa8011fbb6d5fbef7, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/B/d6a09247514e4833987c58b0b73dcff5] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp, totalSize=37.2 K 2024-11-27T16:21:19,678 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/A/41d279b4b4ab43508ba4bff8d9cfe3e0, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/A/095647230e3849498284b50af952c723, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/A/e9b4a71a259f43a380479e12d2daa704] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp, totalSize=39.6 K 2024-11-27T16:21:19,678 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting 97489c4435174c56a26afc51575de07b, keycount=150, bloomtype=ROW, size=13.1 K, encoding=NONE, compression=NONE, seqNum=487, earliestPutTs=1732724475584 2024-11-27T16:21:19,678 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 41d279b4b4ab43508ba4bff8d9cfe3e0, keycount=150, bloomtype=ROW, size=13.1 K, encoding=NONE, compression=NONE, seqNum=487, earliestPutTs=1732724475584 2024-11-27T16:21:19,679 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 095647230e3849498284b50af952c723, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=510, earliestPutTs=1732724476225 2024-11-27T16:21:19,679 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting b4a2a0a048774ccaa8011fbb6d5fbef7, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=510, earliestPutTs=1732724476225 2024-11-27T16:21:19,679 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.Compactor(224): Compacting e9b4a71a259f43a380479e12d2daa704, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=523, earliestPutTs=1732724478373 2024-11-27T16:21:19,679 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting d6a09247514e4833987c58b0b73dcff5, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=523, earliestPutTs=1732724478373 2024-11-27T16:21:19,687 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): d498a187112eb3635082ffac2dfb4cf9#B#compaction#111 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-27T16:21:19,688 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/B/a6bb9cbfd79945b1a812abe1c04f7172 is 50, key is test_row_0/B:col10/1732724478373/Put/seqid=0 2024-11-27T16:21:19,688 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): d498a187112eb3635082ffac2dfb4cf9#A#compaction#112 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-27T16:21:19,689 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/A/5214e147759d43a58cbd245aebdf4f36 is 50, key is test_row_0/A:col10/1732724478373/Put/seqid=0 2024-11-27T16:21:19,695 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073741950_1126 (size=13561) 2024-11-27T16:21:19,696 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073741951_1127 (size=13561) 2024-11-27T16:21:19,702 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/A/5214e147759d43a58cbd245aebdf4f36 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/A/5214e147759d43a58cbd245aebdf4f36 2024-11-27T16:21:19,702 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:21:19,703 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=33 2024-11-27T16:21:19,703 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9. 2024-11-27T16:21:19,703 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.HRegion(2837): Flushing d498a187112eb3635082ffac2dfb4cf9 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-27T16:21:19,704 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d498a187112eb3635082ffac2dfb4cf9, store=A 2024-11-27T16:21:19,704 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:21:19,704 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d498a187112eb3635082ffac2dfb4cf9, store=B 2024-11-27T16:21:19,704 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:21:19,704 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d498a187112eb3635082ffac2dfb4cf9, store=C 2024-11-27T16:21:19,704 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:21:19,707 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d498a187112eb3635082ffac2dfb4cf9/A of d498a187112eb3635082ffac2dfb4cf9 into 5214e147759d43a58cbd245aebdf4f36(size=13.2 K), total size for store is 13.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T16:21:19,707 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d498a187112eb3635082ffac2dfb4cf9: 2024-11-27T16:21:19,707 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9., storeName=d498a187112eb3635082ffac2dfb4cf9/A, priority=13, startTime=1732724479676; duration=0sec 2024-11-27T16:21:19,707 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-27T16:21:19,708 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d498a187112eb3635082ffac2dfb4cf9:A 2024-11-27T16:21:19,708 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-27T16:21:19,708 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/A/38e4fd569b3648e081b908631b5a9348 is 50, key is test_row_0/A:col10/1732724478446/Put/seqid=0 2024-11-27T16:21:19,709 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 38061 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-27T16:21:19,709 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HStore(1540): d498a187112eb3635082ffac2dfb4cf9/C is initiating minor compaction (all files) 2024-11-27T16:21:19,709 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d498a187112eb3635082ffac2dfb4cf9/C in TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9. 2024-11-27T16:21:19,709 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/C/666d8f62e2154ae38c220121153551cb, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/C/12c1431e083a4c7cbf75f7556d6a909b, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/C/ffb4700954904678b4ee27e914b244ef] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp, totalSize=37.2 K 2024-11-27T16:21:19,710 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 666d8f62e2154ae38c220121153551cb, keycount=150, bloomtype=ROW, size=13.1 K, encoding=NONE, compression=NONE, seqNum=487, earliestPutTs=1732724475584 2024-11-27T16:21:19,710 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 12c1431e083a4c7cbf75f7556d6a909b, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=510, earliestPutTs=1732724476225 2024-11-27T16:21:19,710 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.Compactor(224): Compacting ffb4700954904678b4ee27e914b244ef, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=523, earliestPutTs=1732724478373 2024-11-27T16:21:19,713 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073741952_1128 (size=12301) 2024-11-27T16:21:19,719 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): d498a187112eb3635082ffac2dfb4cf9#C#compaction#114 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-27T16:21:19,720 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/C/77dfe2d26d7947c9af1b74b91b990506 is 50, key is test_row_0/C:col10/1732724478373/Put/seqid=0 2024-11-27T16:21:19,724 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073741953_1129 (size=13561) 2024-11-27T16:21:20,101 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/B/a6bb9cbfd79945b1a812abe1c04f7172 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/B/a6bb9cbfd79945b1a812abe1c04f7172 2024-11-27T16:21:20,107 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d498a187112eb3635082ffac2dfb4cf9/B of d498a187112eb3635082ffac2dfb4cf9 into a6bb9cbfd79945b1a812abe1c04f7172(size=13.2 K), total size for store is 13.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T16:21:20,107 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d498a187112eb3635082ffac2dfb4cf9: 2024-11-27T16:21:20,107 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9., storeName=d498a187112eb3635082ffac2dfb4cf9/B, priority=13, startTime=1732724479676; duration=0sec 2024-11-27T16:21:20,107 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T16:21:20,107 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d498a187112eb3635082ffac2dfb4cf9:B 2024-11-27T16:21:20,114 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=548 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/A/38e4fd569b3648e081b908631b5a9348 2024-11-27T16:21:20,121 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/B/c7e80b0fbecf40e1a0ccdce342db9d99 is 50, key is test_row_0/B:col10/1732724478446/Put/seqid=0 2024-11-27T16:21:20,125 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073741954_1130 (size=12301) 2024-11-27T16:21:20,129 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/C/77dfe2d26d7947c9af1b74b91b990506 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/C/77dfe2d26d7947c9af1b74b91b990506 2024-11-27T16:21:20,134 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d498a187112eb3635082ffac2dfb4cf9/C of d498a187112eb3635082ffac2dfb4cf9 into 77dfe2d26d7947c9af1b74b91b990506(size=13.2 K), total size for store is 13.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T16:21:20,134 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d498a187112eb3635082ffac2dfb4cf9: 2024-11-27T16:21:20,134 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9., storeName=d498a187112eb3635082ffac2dfb4cf9/C, priority=13, startTime=1732724479676; duration=0sec 2024-11-27T16:21:20,135 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T16:21:20,135 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d498a187112eb3635082ffac2dfb4cf9:C 2024-11-27T16:21:20,526 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=548 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/B/c7e80b0fbecf40e1a0ccdce342db9d99 2024-11-27T16:21:20,534 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/C/9a150d9d742e4ff78e7a66bd60dccd4b is 50, key is test_row_0/C:col10/1732724478446/Put/seqid=0 2024-11-27T16:21:20,538 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073741955_1131 (size=12301) 2024-11-27T16:21:20,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(8581): Flush requested on d498a187112eb3635082ffac2dfb4cf9 2024-11-27T16:21:20,571 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9. as already flushing 2024-11-27T16:21:20,571 DEBUG [Thread-157 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x0d888e3e to 127.0.0.1:51088 2024-11-27T16:21:20,572 DEBUG [Thread-157 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-27T16:21:20,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=32 2024-11-27T16:21:20,939 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=548 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/C/9a150d9d742e4ff78e7a66bd60dccd4b 2024-11-27T16:21:20,944 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/A/38e4fd569b3648e081b908631b5a9348 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/A/38e4fd569b3648e081b908631b5a9348 2024-11-27T16:21:20,948 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/A/38e4fd569b3648e081b908631b5a9348, entries=150, sequenceid=548, filesize=12.0 K 2024-11-27T16:21:20,949 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/B/c7e80b0fbecf40e1a0ccdce342db9d99 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/B/c7e80b0fbecf40e1a0ccdce342db9d99 2024-11-27T16:21:20,953 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/B/c7e80b0fbecf40e1a0ccdce342db9d99, entries=150, sequenceid=548, filesize=12.0 K 2024-11-27T16:21:20,953 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/C/9a150d9d742e4ff78e7a66bd60dccd4b as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/C/9a150d9d742e4ff78e7a66bd60dccd4b 2024-11-27T16:21:20,957 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/C/9a150d9d742e4ff78e7a66bd60dccd4b, entries=150, sequenceid=548, filesize=12.0 K 2024-11-27T16:21:20,958 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=6.71 KB/6870 for d498a187112eb3635082ffac2dfb4cf9 in 1255ms, sequenceid=548, compaction requested=false 2024-11-27T16:21:20,958 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.HRegion(2538): Flush status journal for d498a187112eb3635082ffac2dfb4cf9: 2024-11-27T16:21:20,958 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9. 2024-11-27T16:21:20,958 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=33 2024-11-27T16:21:20,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4106): Remote procedure done, pid=33 2024-11-27T16:21:20,960 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=33, resume processing ppid=32 2024-11-27T16:21:20,961 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=33, ppid=32, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.4810 sec 2024-11-27T16:21:20,962 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=32, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=32, table=TestAcidGuarantees in 2.4860 sec 2024-11-27T16:21:21,291 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-27T16:21:22,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=32 2024-11-27T16:21:22,587 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 32 completed 2024-11-27T16:21:27,024 DEBUG [Thread-149 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x24512372 to 127.0.0.1:51088 2024-11-27T16:21:27,024 DEBUG [Thread-149 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-27T16:21:27,037 DEBUG [Thread-155 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x5c9b2c1d to 127.0.0.1:51088 2024-11-27T16:21:27,037 DEBUG [Thread-155 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-27T16:21:27,057 DEBUG [Thread-151 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x53623ce6 to 127.0.0.1:51088 2024-11-27T16:21:27,057 DEBUG [Thread-151 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-27T16:21:27,064 DEBUG [Thread-153 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x66d523ff to 127.0.0.1:51088 2024-11-27T16:21:27,064 DEBUG [Thread-153 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-27T16:21:27,064 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(392): Finished test. Writers: 2024-11-27T16:21:27,065 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 48 2024-11-27T16:21:27,065 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 47 2024-11-27T16:21:27,065 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 47 2024-11-27T16:21:27,065 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 50 2024-11-27T16:21:27,065 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 244 2024-11-27T16:21:27,065 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(396): Readers: 2024-11-27T16:21:27,065 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 5910 2024-11-27T16:21:27,065 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 5917 2024-11-27T16:21:27,065 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(400): Scanners: 2024-11-27T16:21:27,065 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2663 2024-11-27T16:21:27,065 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 7989 rows 2024-11-27T16:21:27,065 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2638 2024-11-27T16:21:27,065 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 7914 rows 2024-11-27T16:21:27,065 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-11-27T16:21:27,065 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x02a08c5a to 127.0.0.1:51088 2024-11-27T16:21:27,065 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-27T16:21:27,071 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of TestAcidGuarantees 2024-11-27T16:21:27,075 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable TestAcidGuarantees 2024-11-27T16:21:27,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] procedure2.ProcedureExecutor(1098): Stored pid=34, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=TestAcidGuarantees 2024-11-27T16:21:27,084 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732724487084"}]},"ts":"1732724487084"} 2024-11-27T16:21:27,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=34 2024-11-27T16:21:27,085 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLING in hbase:meta 2024-11-27T16:21:27,087 INFO [PEWorker-4 {}] procedure.DisableTableProcedure(284): Set TestAcidGuarantees to state=DISABLING 2024-11-27T16:21:27,089 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=35, ppid=34, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=TestAcidGuarantees}] 2024-11-27T16:21:27,093 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=36, ppid=35, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=d498a187112eb3635082ffac2dfb4cf9, UNASSIGN}] 2024-11-27T16:21:27,093 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=36, ppid=35, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=d498a187112eb3635082ffac2dfb4cf9, UNASSIGN 2024-11-27T16:21:27,094 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=36 updating hbase:meta row=d498a187112eb3635082ffac2dfb4cf9, regionState=CLOSING, regionLocation=7b191dec6496,44169,1732724452967 2024-11-27T16:21:27,095 DEBUG [PEWorker-1 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-27T16:21:27,096 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=37, ppid=36, state=RUNNABLE; CloseRegionProcedure d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967}] 2024-11-27T16:21:27,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=34 2024-11-27T16:21:27,251 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:21:27,253 INFO [RS_CLOSE_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] handler.UnassignRegionHandler(124): Close d498a187112eb3635082ffac2dfb4cf9 2024-11-27T16:21:27,253 DEBUG [RS_CLOSE_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-11-27T16:21:27,254 DEBUG [RS_CLOSE_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] regionserver.HRegion(1681): Closing d498a187112eb3635082ffac2dfb4cf9, disabling compactions & flushes 2024-11-27T16:21:27,254 INFO [RS_CLOSE_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9. 2024-11-27T16:21:27,254 DEBUG [RS_CLOSE_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9. 2024-11-27T16:21:27,254 DEBUG [RS_CLOSE_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9. after waiting 0 ms 2024-11-27T16:21:27,254 DEBUG [RS_CLOSE_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9. 2024-11-27T16:21:27,254 INFO [RS_CLOSE_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] regionserver.HRegion(2837): Flushing d498a187112eb3635082ffac2dfb4cf9 3/3 column families, dataSize=33.54 KB heapSize=88.64 KB 2024-11-27T16:21:27,254 DEBUG [RS_CLOSE_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d498a187112eb3635082ffac2dfb4cf9, store=A 2024-11-27T16:21:27,254 DEBUG [RS_CLOSE_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:21:27,254 DEBUG [RS_CLOSE_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d498a187112eb3635082ffac2dfb4cf9, store=B 2024-11-27T16:21:27,254 DEBUG [RS_CLOSE_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:21:27,254 DEBUG [RS_CLOSE_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d498a187112eb3635082ffac2dfb4cf9, store=C 2024-11-27T16:21:27,254 DEBUG [RS_CLOSE_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:21:27,259 DEBUG [RS_CLOSE_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/A/50ea73f1f49f451d9a463f4f105df391 is 50, key is test_row_1/A:col10/1732724487056/Put/seqid=0 2024-11-27T16:21:27,264 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073741956_1132 (size=9857) 2024-11-27T16:21:27,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=34 2024-11-27T16:21:27,666 INFO [RS_CLOSE_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=559 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/A/50ea73f1f49f451d9a463f4f105df391 2024-11-27T16:21:27,675 DEBUG [RS_CLOSE_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/B/46d45471c15c49b1be3c970a59f24acc is 50, key is test_row_1/B:col10/1732724487056/Put/seqid=0 2024-11-27T16:21:27,679 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073741957_1133 (size=9857) 2024-11-27T16:21:27,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=34 2024-11-27T16:21:28,080 INFO [RS_CLOSE_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=559 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/B/46d45471c15c49b1be3c970a59f24acc 2024-11-27T16:21:28,087 DEBUG [RS_CLOSE_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/C/ff5d307711224e0d9a098717db1a5228 is 50, key is test_row_1/C:col10/1732724487056/Put/seqid=0 2024-11-27T16:21:28,091 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073741958_1134 (size=9857) 2024-11-27T16:21:28,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=34 2024-11-27T16:21:28,492 INFO [RS_CLOSE_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=559 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/C/ff5d307711224e0d9a098717db1a5228 2024-11-27T16:21:28,496 DEBUG [RS_CLOSE_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/A/50ea73f1f49f451d9a463f4f105df391 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/A/50ea73f1f49f451d9a463f4f105df391 2024-11-27T16:21:28,500 INFO [RS_CLOSE_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/A/50ea73f1f49f451d9a463f4f105df391, entries=100, sequenceid=559, filesize=9.6 K 2024-11-27T16:21:28,501 DEBUG [RS_CLOSE_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/B/46d45471c15c49b1be3c970a59f24acc as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/B/46d45471c15c49b1be3c970a59f24acc 2024-11-27T16:21:28,505 INFO [RS_CLOSE_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/B/46d45471c15c49b1be3c970a59f24acc, entries=100, sequenceid=559, filesize=9.6 K 2024-11-27T16:21:28,506 DEBUG [RS_CLOSE_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/.tmp/C/ff5d307711224e0d9a098717db1a5228 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/C/ff5d307711224e0d9a098717db1a5228 2024-11-27T16:21:28,510 INFO [RS_CLOSE_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/C/ff5d307711224e0d9a098717db1a5228, entries=100, sequenceid=559, filesize=9.6 K 2024-11-27T16:21:28,511 INFO [RS_CLOSE_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] regionserver.HRegion(3040): Finished flush of dataSize ~33.54 KB/34350, heapSize ~88.59 KB/90720, currentSize=0 B/0 for d498a187112eb3635082ffac2dfb4cf9 in 1257ms, sequenceid=559, compaction requested=true 2024-11-27T16:21:28,512 DEBUG [StoreCloser-TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/A/4ad0db3684374730be5ea5fbdfe679a1, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/A/712c1f3117ab46179edabc79dacbd358, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/A/619602cf0e7c4a50991dd3b94d0515e6, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/A/e8c8fe1362c24e8aaf1e7ec1a25bcb1a, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/A/5f59276818374ce68a5bebb41171ea9b, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/A/4aab2c78ca6046a79ff93d2ce172a3f2, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/A/b8efb0547edc40d09f082dfae177e954, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/A/1108887f283a4f4d91a7c954ed7db869, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/A/8d2ed70f414a4d9a9b7b99cff3ed306e, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/A/32069e77767e417489e0b765c7359f12, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/A/e14183462e9948eaa9b02f92420e4273, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/A/111a3867c2e04e438968114192cca81f, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/A/3c7df496ef5b49d7a39271bee7e00519, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/A/b7493f62cc1b4d1784057d9aadf636dd, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/A/fb96e196b2fb4b47947f090b4865de76, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/A/edf40a837cbb42b9896ad8ba256f8aad, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/A/856076fddf7e400ea994f60dda84a73c, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/A/60dff7215c8f4493b288d92dacaa258e, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/A/df5f4366b63d48f28cc29b867927b3c0, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/A/8c5c05810c324c34a589cfc0e3d2716a, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/A/15a7ef453bec4bb5a201c81ff09c8a17, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/A/20033b0a91f1489fb5842d5f0158c835, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/A/1ab6737aff264112993b90fe7db66374, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/A/88a89e647af54b01bca51190f1d2b51d, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/A/26ccd1f3460042edb0b90da810a44326, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/A/c52c567c854e46008710452ce175cf36, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/A/b49f37ea8cb54b78aad63ee64fa2aaf3, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/A/11026ccaed304892b69dbd5b940e2798, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/A/dbb18098580f4dca854dbf25b2bfbd3b, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/A/8c0e0074f68e4d0a81093f8b6515f4e3, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/A/30339113df074e55b9b819a2aea5d318, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/A/fb9ba5cc02d943d7b70e96abebf7e2e5, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/A/ede8d8bfb67245e781f4b91b091e5879, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/A/6b6de4a134e448bdb9996e6b0fde1097, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/A/41d279b4b4ab43508ba4bff8d9cfe3e0, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/A/095647230e3849498284b50af952c723, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/A/e9b4a71a259f43a380479e12d2daa704] to archive 2024-11-27T16:21:28,515 DEBUG [StoreCloser-TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-27T16:21:28,520 DEBUG [StoreCloser-TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/A/4ad0db3684374730be5ea5fbdfe679a1 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/A/4ad0db3684374730be5ea5fbdfe679a1 2024-11-27T16:21:28,522 DEBUG [StoreCloser-TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/A/712c1f3117ab46179edabc79dacbd358 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/A/712c1f3117ab46179edabc79dacbd358 2024-11-27T16:21:28,523 DEBUG [StoreCloser-TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/A/619602cf0e7c4a50991dd3b94d0515e6 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/A/619602cf0e7c4a50991dd3b94d0515e6 2024-11-27T16:21:28,525 DEBUG [StoreCloser-TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/A/e8c8fe1362c24e8aaf1e7ec1a25bcb1a to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/A/e8c8fe1362c24e8aaf1e7ec1a25bcb1a 2024-11-27T16:21:28,526 DEBUG [StoreCloser-TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/A/5f59276818374ce68a5bebb41171ea9b to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/A/5f59276818374ce68a5bebb41171ea9b 2024-11-27T16:21:28,528 DEBUG [StoreCloser-TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/A/4aab2c78ca6046a79ff93d2ce172a3f2 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/A/4aab2c78ca6046a79ff93d2ce172a3f2 2024-11-27T16:21:28,529 DEBUG [StoreCloser-TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/A/b8efb0547edc40d09f082dfae177e954 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/A/b8efb0547edc40d09f082dfae177e954 2024-11-27T16:21:28,531 DEBUG [StoreCloser-TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/A/1108887f283a4f4d91a7c954ed7db869 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/A/1108887f283a4f4d91a7c954ed7db869 2024-11-27T16:21:28,532 DEBUG [StoreCloser-TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/A/8d2ed70f414a4d9a9b7b99cff3ed306e to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/A/8d2ed70f414a4d9a9b7b99cff3ed306e 2024-11-27T16:21:28,533 DEBUG [StoreCloser-TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/A/32069e77767e417489e0b765c7359f12 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/A/32069e77767e417489e0b765c7359f12 2024-11-27T16:21:28,535 DEBUG [StoreCloser-TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/A/e14183462e9948eaa9b02f92420e4273 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/A/e14183462e9948eaa9b02f92420e4273 2024-11-27T16:21:28,536 DEBUG [StoreCloser-TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/A/111a3867c2e04e438968114192cca81f to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/A/111a3867c2e04e438968114192cca81f 2024-11-27T16:21:28,537 DEBUG [StoreCloser-TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/A/3c7df496ef5b49d7a39271bee7e00519 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/A/3c7df496ef5b49d7a39271bee7e00519 2024-11-27T16:21:28,538 DEBUG [StoreCloser-TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/A/b7493f62cc1b4d1784057d9aadf636dd to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/A/b7493f62cc1b4d1784057d9aadf636dd 2024-11-27T16:21:28,539 DEBUG [StoreCloser-TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/A/fb96e196b2fb4b47947f090b4865de76 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/A/fb96e196b2fb4b47947f090b4865de76 2024-11-27T16:21:28,541 DEBUG [StoreCloser-TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/A/edf40a837cbb42b9896ad8ba256f8aad to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/A/edf40a837cbb42b9896ad8ba256f8aad 2024-11-27T16:21:28,542 DEBUG [StoreCloser-TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/A/856076fddf7e400ea994f60dda84a73c to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/A/856076fddf7e400ea994f60dda84a73c 2024-11-27T16:21:28,544 DEBUG [StoreCloser-TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/A/60dff7215c8f4493b288d92dacaa258e to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/A/60dff7215c8f4493b288d92dacaa258e 2024-11-27T16:21:28,545 DEBUG [StoreCloser-TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/A/df5f4366b63d48f28cc29b867927b3c0 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/A/df5f4366b63d48f28cc29b867927b3c0 2024-11-27T16:21:28,547 DEBUG [StoreCloser-TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/A/8c5c05810c324c34a589cfc0e3d2716a to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/A/8c5c05810c324c34a589cfc0e3d2716a 2024-11-27T16:21:28,548 DEBUG [StoreCloser-TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/A/15a7ef453bec4bb5a201c81ff09c8a17 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/A/15a7ef453bec4bb5a201c81ff09c8a17 2024-11-27T16:21:28,549 DEBUG [StoreCloser-TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/A/20033b0a91f1489fb5842d5f0158c835 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/A/20033b0a91f1489fb5842d5f0158c835 2024-11-27T16:21:28,550 DEBUG [StoreCloser-TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/A/1ab6737aff264112993b90fe7db66374 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/A/1ab6737aff264112993b90fe7db66374 2024-11-27T16:21:28,552 DEBUG [StoreCloser-TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/A/88a89e647af54b01bca51190f1d2b51d to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/A/88a89e647af54b01bca51190f1d2b51d 2024-11-27T16:21:28,553 DEBUG [StoreCloser-TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/A/26ccd1f3460042edb0b90da810a44326 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/A/26ccd1f3460042edb0b90da810a44326 2024-11-27T16:21:28,554 DEBUG [StoreCloser-TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/A/c52c567c854e46008710452ce175cf36 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/A/c52c567c854e46008710452ce175cf36 2024-11-27T16:21:28,555 DEBUG [StoreCloser-TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/A/b49f37ea8cb54b78aad63ee64fa2aaf3 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/A/b49f37ea8cb54b78aad63ee64fa2aaf3 2024-11-27T16:21:28,556 DEBUG [StoreCloser-TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/A/11026ccaed304892b69dbd5b940e2798 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/A/11026ccaed304892b69dbd5b940e2798 2024-11-27T16:21:28,558 DEBUG [StoreCloser-TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/A/dbb18098580f4dca854dbf25b2bfbd3b to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/A/dbb18098580f4dca854dbf25b2bfbd3b 2024-11-27T16:21:28,559 DEBUG [StoreCloser-TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/A/8c0e0074f68e4d0a81093f8b6515f4e3 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/A/8c0e0074f68e4d0a81093f8b6515f4e3 2024-11-27T16:21:28,560 DEBUG [StoreCloser-TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/A/30339113df074e55b9b819a2aea5d318 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/A/30339113df074e55b9b819a2aea5d318 2024-11-27T16:21:28,561 DEBUG [StoreCloser-TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/A/fb9ba5cc02d943d7b70e96abebf7e2e5 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/A/fb9ba5cc02d943d7b70e96abebf7e2e5 2024-11-27T16:21:28,562 DEBUG [StoreCloser-TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/A/ede8d8bfb67245e781f4b91b091e5879 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/A/ede8d8bfb67245e781f4b91b091e5879 2024-11-27T16:21:28,563 DEBUG [StoreCloser-TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/A/6b6de4a134e448bdb9996e6b0fde1097 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/A/6b6de4a134e448bdb9996e6b0fde1097 2024-11-27T16:21:28,564 DEBUG [StoreCloser-TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/A/41d279b4b4ab43508ba4bff8d9cfe3e0 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/A/41d279b4b4ab43508ba4bff8d9cfe3e0 2024-11-27T16:21:28,566 DEBUG [StoreCloser-TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/A/095647230e3849498284b50af952c723 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/A/095647230e3849498284b50af952c723 2024-11-27T16:21:28,567 DEBUG [StoreCloser-TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/A/e9b4a71a259f43a380479e12d2daa704 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/A/e9b4a71a259f43a380479e12d2daa704 2024-11-27T16:21:28,581 DEBUG [StoreCloser-TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/B/12c0e6483cf94472ae4f102788838dde, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/B/d0c4de35809c44a484ab80f9d73bf3ae, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/B/f4638405c4244a5fa4e71fc6974b30d0, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/B/49a95b0a4f3a45e9a54e335e4fd1e317, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/B/c33b6a2f1e4840f394d5f11471ac8a8e, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/B/3154ac50155b493c9e1a293bf3943ec3, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/B/ee8609ee64a2476da21463623559ccc7, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/B/8f657cdacc484061880f654ef4b0754c, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/B/90c57b4d9df6410c9122c0fd597bb69c, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/B/9bb481d48ecd4c39b4757dbebc134751, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/B/20994f7d06ed40959f61194e939215cd, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/B/d8c1cc8878ab4e40bdb2c42f38b96cb1, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/B/1b106ae95b084f5a9b0a8a97a5fdaacb, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/B/3a3e6740b0c44e4d88aa2671afc29f5b, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/B/93d8f66040564ff4a5a45b1242126ebb, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/B/f694ba9928b241008f5fffd207210b16, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/B/d9662dd7ae8c40758e7366e68e1a0757, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/B/86fd7b60133a44868b62e9b63fc138c5, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/B/f104be8048e743268ca3394568acf0fe, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/B/1e47e4f1d38f48bfa265f8b0438e35da, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/B/898aa33bd81a4d1eb4018f2bfe4f3edb, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/B/f7ad4c8fde584367b0918c9db0a2263d, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/B/2f125909aabe4f6587c2585c5fb9f28d, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/B/321a12869b5448f0a43b7a68929d5cf4, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/B/db07053f421340aab8f79a7249661c36, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/B/85c2aa6d18284e38a5911ee89ddaf616, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/B/719b6d44a8e443c48f271e7bd8494e66, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/B/315b5a4e7dea4b519cc8b042ad98c254, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/B/f70b1a4c8cc74df782519031ca553d91, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/B/2f0df801ee0c4735aacfe87e30ba2189, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/B/c1fc6526d81c48a9922879d6786ac34b, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/B/d3888138403f436688e154ea819dad26, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/B/292cb4f3584f41fc83ca5f316d718469, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/B/97489c4435174c56a26afc51575de07b, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/B/4658ce64c863427f829e2e4389cc26e2, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/B/b4a2a0a048774ccaa8011fbb6d5fbef7, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/B/d6a09247514e4833987c58b0b73dcff5] to archive 2024-11-27T16:21:28,582 DEBUG [StoreCloser-TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-27T16:21:28,584 DEBUG [StoreCloser-TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/B/12c0e6483cf94472ae4f102788838dde to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/B/12c0e6483cf94472ae4f102788838dde 2024-11-27T16:21:28,585 DEBUG [StoreCloser-TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/B/d0c4de35809c44a484ab80f9d73bf3ae to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/B/d0c4de35809c44a484ab80f9d73bf3ae 2024-11-27T16:21:28,586 DEBUG [StoreCloser-TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/B/f4638405c4244a5fa4e71fc6974b30d0 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/B/f4638405c4244a5fa4e71fc6974b30d0 2024-11-27T16:21:28,587 DEBUG [StoreCloser-TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/B/49a95b0a4f3a45e9a54e335e4fd1e317 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/B/49a95b0a4f3a45e9a54e335e4fd1e317 2024-11-27T16:21:28,589 DEBUG [StoreCloser-TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/B/c33b6a2f1e4840f394d5f11471ac8a8e to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/B/c33b6a2f1e4840f394d5f11471ac8a8e 2024-11-27T16:21:28,590 DEBUG [StoreCloser-TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/B/3154ac50155b493c9e1a293bf3943ec3 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/B/3154ac50155b493c9e1a293bf3943ec3 2024-11-27T16:21:28,591 DEBUG [StoreCloser-TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/B/ee8609ee64a2476da21463623559ccc7 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/B/ee8609ee64a2476da21463623559ccc7 2024-11-27T16:21:28,592 DEBUG [StoreCloser-TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/B/8f657cdacc484061880f654ef4b0754c to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/B/8f657cdacc484061880f654ef4b0754c 2024-11-27T16:21:28,593 DEBUG [StoreCloser-TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/B/90c57b4d9df6410c9122c0fd597bb69c to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/B/90c57b4d9df6410c9122c0fd597bb69c 2024-11-27T16:21:28,594 DEBUG [StoreCloser-TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/B/9bb481d48ecd4c39b4757dbebc134751 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/B/9bb481d48ecd4c39b4757dbebc134751 2024-11-27T16:21:28,596 DEBUG [StoreCloser-TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/B/20994f7d06ed40959f61194e939215cd to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/B/20994f7d06ed40959f61194e939215cd 2024-11-27T16:21:28,597 DEBUG [StoreCloser-TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/B/d8c1cc8878ab4e40bdb2c42f38b96cb1 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/B/d8c1cc8878ab4e40bdb2c42f38b96cb1 2024-11-27T16:21:28,598 DEBUG [StoreCloser-TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/B/1b106ae95b084f5a9b0a8a97a5fdaacb to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/B/1b106ae95b084f5a9b0a8a97a5fdaacb 2024-11-27T16:21:28,599 DEBUG [StoreCloser-TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/B/3a3e6740b0c44e4d88aa2671afc29f5b to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/B/3a3e6740b0c44e4d88aa2671afc29f5b 2024-11-27T16:21:28,601 DEBUG [StoreCloser-TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/B/93d8f66040564ff4a5a45b1242126ebb to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/B/93d8f66040564ff4a5a45b1242126ebb 2024-11-27T16:21:28,602 DEBUG [StoreCloser-TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/B/f694ba9928b241008f5fffd207210b16 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/B/f694ba9928b241008f5fffd207210b16 2024-11-27T16:21:28,603 DEBUG [StoreCloser-TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/B/d9662dd7ae8c40758e7366e68e1a0757 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/B/d9662dd7ae8c40758e7366e68e1a0757 2024-11-27T16:21:28,604 DEBUG [StoreCloser-TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/B/86fd7b60133a44868b62e9b63fc138c5 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/B/86fd7b60133a44868b62e9b63fc138c5 2024-11-27T16:21:28,605 DEBUG [StoreCloser-TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/B/f104be8048e743268ca3394568acf0fe to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/B/f104be8048e743268ca3394568acf0fe 2024-11-27T16:21:28,607 DEBUG [StoreCloser-TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/B/1e47e4f1d38f48bfa265f8b0438e35da to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/B/1e47e4f1d38f48bfa265f8b0438e35da 2024-11-27T16:21:28,608 DEBUG [StoreCloser-TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/B/898aa33bd81a4d1eb4018f2bfe4f3edb to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/B/898aa33bd81a4d1eb4018f2bfe4f3edb 2024-11-27T16:21:28,609 DEBUG [StoreCloser-TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/B/f7ad4c8fde584367b0918c9db0a2263d to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/B/f7ad4c8fde584367b0918c9db0a2263d 2024-11-27T16:21:28,610 DEBUG [StoreCloser-TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/B/2f125909aabe4f6587c2585c5fb9f28d to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/B/2f125909aabe4f6587c2585c5fb9f28d 2024-11-27T16:21:28,611 DEBUG [StoreCloser-TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/B/321a12869b5448f0a43b7a68929d5cf4 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/B/321a12869b5448f0a43b7a68929d5cf4 2024-11-27T16:21:28,612 DEBUG [StoreCloser-TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/B/db07053f421340aab8f79a7249661c36 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/B/db07053f421340aab8f79a7249661c36 2024-11-27T16:21:28,614 DEBUG [StoreCloser-TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/B/85c2aa6d18284e38a5911ee89ddaf616 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/B/85c2aa6d18284e38a5911ee89ddaf616 2024-11-27T16:21:28,615 DEBUG [StoreCloser-TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/B/719b6d44a8e443c48f271e7bd8494e66 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/B/719b6d44a8e443c48f271e7bd8494e66 2024-11-27T16:21:28,616 DEBUG [StoreCloser-TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/B/315b5a4e7dea4b519cc8b042ad98c254 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/B/315b5a4e7dea4b519cc8b042ad98c254 2024-11-27T16:21:28,617 DEBUG [StoreCloser-TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/B/f70b1a4c8cc74df782519031ca553d91 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/B/f70b1a4c8cc74df782519031ca553d91 2024-11-27T16:21:28,618 DEBUG [StoreCloser-TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/B/2f0df801ee0c4735aacfe87e30ba2189 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/B/2f0df801ee0c4735aacfe87e30ba2189 2024-11-27T16:21:28,620 DEBUG [StoreCloser-TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/B/c1fc6526d81c48a9922879d6786ac34b to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/B/c1fc6526d81c48a9922879d6786ac34b 2024-11-27T16:21:28,621 DEBUG [StoreCloser-TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/B/d3888138403f436688e154ea819dad26 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/B/d3888138403f436688e154ea819dad26 2024-11-27T16:21:28,622 DEBUG [StoreCloser-TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/B/292cb4f3584f41fc83ca5f316d718469 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/B/292cb4f3584f41fc83ca5f316d718469 2024-11-27T16:21:28,623 DEBUG [StoreCloser-TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/B/97489c4435174c56a26afc51575de07b to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/B/97489c4435174c56a26afc51575de07b 2024-11-27T16:21:28,625 DEBUG [StoreCloser-TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/B/4658ce64c863427f829e2e4389cc26e2 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/B/4658ce64c863427f829e2e4389cc26e2 2024-11-27T16:21:28,626 DEBUG [StoreCloser-TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/B/b4a2a0a048774ccaa8011fbb6d5fbef7 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/B/b4a2a0a048774ccaa8011fbb6d5fbef7 2024-11-27T16:21:28,627 DEBUG [StoreCloser-TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/B/d6a09247514e4833987c58b0b73dcff5 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/B/d6a09247514e4833987c58b0b73dcff5 2024-11-27T16:21:28,628 DEBUG [StoreCloser-TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/C/d6b3a5cc797c4096ad6879ded24a04e5, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/C/f62c916a86e44395a691a5aa77f3d863, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/C/e21547f1576a426097b2ea981de51e60, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/C/36a06d5a83e94c1ea7bb5c7cd76c0c57, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/C/0dd56f2ddbe54ca8b5597a08be3d354f, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/C/172fbd44c9d94147b760377a210720a4, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/C/5d367d07c29243b79ef715791d9bb333, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/C/f7c8558f74bf43f1bf704ffbebb7bfa2, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/C/73a00bdf6ec0434d99b7ba354bbd58d8, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/C/07526932f8c142dc83aa26e8ea7fe2a3, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/C/99a722ab62554990bbde265a8eb5d0ce, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/C/052b42f85efd48bbb005a447999dabda, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/C/156e1da83f914a1e9e418394eab431f0, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/C/f8140f3ba7ff429cb2a72fe10adf911f, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/C/6ef4bf9868224957829f335584a52c06, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/C/f4fa77d91b8f4671b3164497de3c4ab9, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/C/8ac21d2deaf742cfb7bebcd503531d3d, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/C/882f97eb7d134e268b01cd978c05886b, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/C/6cf8ac1cc5f440ee8c5c91b5372bef85, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/C/8ac00526098745c2aa2df339cc2835fc, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/C/652298c25aec4a0b9f63c2f9c0c99c6a, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/C/cd784bb0ba514a36b63c42ed745070c5, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/C/b90fd464dcb7442f946599f1b97363fd, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/C/3209dfa5b1e2422c8befc1517b5d7c2c, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/C/ad87d89786fd4da895373e96e266e048, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/C/1679c4b09c454e179e0b4009a57e239a, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/C/18c5c97905b7451c9bfcc25a9993859f, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/C/5d86ec6d915a4ea080340e1f4eac48c5, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/C/a306b786311d4d1cbdd7c33e601f855e, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/C/86e8ba91d9594b7fb1d0fc19ed3e8bcf, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/C/180eb584ab99445a8a090eaed2c6117d, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/C/916e7fc2df234ef898122edf189559f2, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/C/c543db472ff3490594ea25d73f2a28bf, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/C/666d8f62e2154ae38c220121153551cb, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/C/4ba7c66f45d6479ba964dba50be9456d, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/C/12c1431e083a4c7cbf75f7556d6a909b, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/C/ffb4700954904678b4ee27e914b244ef] to archive 2024-11-27T16:21:28,629 DEBUG [StoreCloser-TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-27T16:21:28,631 DEBUG [StoreCloser-TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/C/d6b3a5cc797c4096ad6879ded24a04e5 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/C/d6b3a5cc797c4096ad6879ded24a04e5 2024-11-27T16:21:28,632 DEBUG [StoreCloser-TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/C/f62c916a86e44395a691a5aa77f3d863 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/C/f62c916a86e44395a691a5aa77f3d863 2024-11-27T16:21:28,633 DEBUG [StoreCloser-TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/C/e21547f1576a426097b2ea981de51e60 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/C/e21547f1576a426097b2ea981de51e60 2024-11-27T16:21:28,634 DEBUG [StoreCloser-TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/C/36a06d5a83e94c1ea7bb5c7cd76c0c57 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/C/36a06d5a83e94c1ea7bb5c7cd76c0c57 2024-11-27T16:21:28,635 DEBUG [StoreCloser-TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/C/0dd56f2ddbe54ca8b5597a08be3d354f to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/C/0dd56f2ddbe54ca8b5597a08be3d354f 2024-11-27T16:21:28,637 DEBUG [StoreCloser-TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/C/172fbd44c9d94147b760377a210720a4 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/C/172fbd44c9d94147b760377a210720a4 2024-11-27T16:21:28,638 DEBUG [StoreCloser-TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/C/5d367d07c29243b79ef715791d9bb333 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/C/5d367d07c29243b79ef715791d9bb333 2024-11-27T16:21:28,639 DEBUG [StoreCloser-TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/C/f7c8558f74bf43f1bf704ffbebb7bfa2 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/C/f7c8558f74bf43f1bf704ffbebb7bfa2 2024-11-27T16:21:28,640 DEBUG [StoreCloser-TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/C/73a00bdf6ec0434d99b7ba354bbd58d8 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/C/73a00bdf6ec0434d99b7ba354bbd58d8 2024-11-27T16:21:28,641 DEBUG [StoreCloser-TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/C/07526932f8c142dc83aa26e8ea7fe2a3 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/C/07526932f8c142dc83aa26e8ea7fe2a3 2024-11-27T16:21:28,642 DEBUG [StoreCloser-TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/C/99a722ab62554990bbde265a8eb5d0ce to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/C/99a722ab62554990bbde265a8eb5d0ce 2024-11-27T16:21:28,643 DEBUG [StoreCloser-TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/C/052b42f85efd48bbb005a447999dabda to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/C/052b42f85efd48bbb005a447999dabda 2024-11-27T16:21:28,644 DEBUG [StoreCloser-TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/C/156e1da83f914a1e9e418394eab431f0 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/C/156e1da83f914a1e9e418394eab431f0 2024-11-27T16:21:28,646 DEBUG [StoreCloser-TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/C/f8140f3ba7ff429cb2a72fe10adf911f to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/C/f8140f3ba7ff429cb2a72fe10adf911f 2024-11-27T16:21:28,647 DEBUG [StoreCloser-TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/C/6ef4bf9868224957829f335584a52c06 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/C/6ef4bf9868224957829f335584a52c06 2024-11-27T16:21:28,648 DEBUG [StoreCloser-TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/C/f4fa77d91b8f4671b3164497de3c4ab9 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/C/f4fa77d91b8f4671b3164497de3c4ab9 2024-11-27T16:21:28,649 DEBUG [StoreCloser-TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/C/8ac21d2deaf742cfb7bebcd503531d3d to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/C/8ac21d2deaf742cfb7bebcd503531d3d 2024-11-27T16:21:28,650 DEBUG [StoreCloser-TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/C/882f97eb7d134e268b01cd978c05886b to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/C/882f97eb7d134e268b01cd978c05886b 2024-11-27T16:21:28,652 DEBUG [StoreCloser-TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/C/6cf8ac1cc5f440ee8c5c91b5372bef85 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/C/6cf8ac1cc5f440ee8c5c91b5372bef85 2024-11-27T16:21:28,653 DEBUG [StoreCloser-TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/C/8ac00526098745c2aa2df339cc2835fc to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/C/8ac00526098745c2aa2df339cc2835fc 2024-11-27T16:21:28,654 DEBUG [StoreCloser-TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/C/652298c25aec4a0b9f63c2f9c0c99c6a to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/C/652298c25aec4a0b9f63c2f9c0c99c6a 2024-11-27T16:21:28,655 DEBUG [StoreCloser-TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/C/cd784bb0ba514a36b63c42ed745070c5 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/C/cd784bb0ba514a36b63c42ed745070c5 2024-11-27T16:21:28,657 DEBUG [StoreCloser-TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/C/b90fd464dcb7442f946599f1b97363fd to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/C/b90fd464dcb7442f946599f1b97363fd 2024-11-27T16:21:28,658 DEBUG [StoreCloser-TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/C/3209dfa5b1e2422c8befc1517b5d7c2c to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/C/3209dfa5b1e2422c8befc1517b5d7c2c 2024-11-27T16:21:28,660 DEBUG [StoreCloser-TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/C/ad87d89786fd4da895373e96e266e048 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/C/ad87d89786fd4da895373e96e266e048 2024-11-27T16:21:28,661 DEBUG [StoreCloser-TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/C/1679c4b09c454e179e0b4009a57e239a to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/C/1679c4b09c454e179e0b4009a57e239a 2024-11-27T16:21:28,662 DEBUG [StoreCloser-TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/C/18c5c97905b7451c9bfcc25a9993859f to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/C/18c5c97905b7451c9bfcc25a9993859f 2024-11-27T16:21:28,663 DEBUG [StoreCloser-TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/C/5d86ec6d915a4ea080340e1f4eac48c5 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/C/5d86ec6d915a4ea080340e1f4eac48c5 2024-11-27T16:21:28,665 DEBUG [StoreCloser-TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/C/a306b786311d4d1cbdd7c33e601f855e to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/C/a306b786311d4d1cbdd7c33e601f855e 2024-11-27T16:21:28,666 DEBUG [StoreCloser-TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/C/86e8ba91d9594b7fb1d0fc19ed3e8bcf to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/C/86e8ba91d9594b7fb1d0fc19ed3e8bcf 2024-11-27T16:21:28,668 DEBUG [StoreCloser-TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/C/180eb584ab99445a8a090eaed2c6117d to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/C/180eb584ab99445a8a090eaed2c6117d 2024-11-27T16:21:28,669 DEBUG [StoreCloser-TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/C/916e7fc2df234ef898122edf189559f2 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/C/916e7fc2df234ef898122edf189559f2 2024-11-27T16:21:28,670 DEBUG [StoreCloser-TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/C/c543db472ff3490594ea25d73f2a28bf to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/C/c543db472ff3490594ea25d73f2a28bf 2024-11-27T16:21:28,672 DEBUG [StoreCloser-TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/C/666d8f62e2154ae38c220121153551cb to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/C/666d8f62e2154ae38c220121153551cb 2024-11-27T16:21:28,673 DEBUG [StoreCloser-TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/C/4ba7c66f45d6479ba964dba50be9456d to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/C/4ba7c66f45d6479ba964dba50be9456d 2024-11-27T16:21:28,674 DEBUG [StoreCloser-TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/C/12c1431e083a4c7cbf75f7556d6a909b to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/C/12c1431e083a4c7cbf75f7556d6a909b 2024-11-27T16:21:28,676 DEBUG [StoreCloser-TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/C/ffb4700954904678b4ee27e914b244ef to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/C/ffb4700954904678b4ee27e914b244ef 2024-11-27T16:21:28,681 DEBUG [RS_CLOSE_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/recovered.edits/562.seqid, newMaxSeqId=562, maxSeqId=1 2024-11-27T16:21:28,683 INFO [RS_CLOSE_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9. 2024-11-27T16:21:28,684 DEBUG [RS_CLOSE_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] regionserver.HRegion(1635): Region close journal for d498a187112eb3635082ffac2dfb4cf9: 2024-11-27T16:21:28,685 INFO [RS_CLOSE_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] handler.UnassignRegionHandler(170): Closed d498a187112eb3635082ffac2dfb4cf9 2024-11-27T16:21:28,686 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=36 updating hbase:meta row=d498a187112eb3635082ffac2dfb4cf9, regionState=CLOSED 2024-11-27T16:21:28,688 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=37, resume processing ppid=36 2024-11-27T16:21:28,688 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=37, ppid=36, state=SUCCESS; CloseRegionProcedure d498a187112eb3635082ffac2dfb4cf9, server=7b191dec6496,44169,1732724452967 in 1.5920 sec 2024-11-27T16:21:28,690 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=36, resume processing ppid=35 2024-11-27T16:21:28,690 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=36, ppid=35, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=d498a187112eb3635082ffac2dfb4cf9, UNASSIGN in 1.5950 sec 2024-11-27T16:21:28,692 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=35, resume processing ppid=34 2024-11-27T16:21:28,692 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=35, ppid=34, state=SUCCESS; CloseTableRegionsProcedure table=TestAcidGuarantees in 1.6020 sec 2024-11-27T16:21:28,693 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732724488693"}]},"ts":"1732724488693"} 2024-11-27T16:21:28,694 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLED in hbase:meta 2024-11-27T16:21:28,696 INFO [PEWorker-1 {}] procedure.DisableTableProcedure(296): Set TestAcidGuarantees to state=DISABLED 2024-11-27T16:21:28,697 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=34, state=SUCCESS; DisableTableProcedure table=TestAcidGuarantees in 1.6180 sec 2024-11-27T16:21:29,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=34 2024-11-27T16:21:29,189 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:TestAcidGuarantees, procId: 34 completed 2024-11-27T16:21:29,192 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete TestAcidGuarantees 2024-11-27T16:21:29,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] procedure2.ProcedureExecutor(1098): Stored pid=38, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=TestAcidGuarantees 2024-11-27T16:21:29,197 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=38, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-27T16:21:29,198 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=38, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-27T16:21:29,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=38 2024-11-27T16:21:29,201 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9 2024-11-27T16:21:29,205 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/A, FileablePath, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/B, FileablePath, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/C, FileablePath, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/recovered.edits] 2024-11-27T16:21:29,209 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/A/38e4fd569b3648e081b908631b5a9348 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/A/38e4fd569b3648e081b908631b5a9348 2024-11-27T16:21:29,210 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/A/50ea73f1f49f451d9a463f4f105df391 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/A/50ea73f1f49f451d9a463f4f105df391 2024-11-27T16:21:29,211 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/A/5214e147759d43a58cbd245aebdf4f36 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/A/5214e147759d43a58cbd245aebdf4f36 2024-11-27T16:21:29,214 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/B/46d45471c15c49b1be3c970a59f24acc to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/B/46d45471c15c49b1be3c970a59f24acc 2024-11-27T16:21:29,216 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/B/a6bb9cbfd79945b1a812abe1c04f7172 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/B/a6bb9cbfd79945b1a812abe1c04f7172 2024-11-27T16:21:29,217 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/B/c7e80b0fbecf40e1a0ccdce342db9d99 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/B/c7e80b0fbecf40e1a0ccdce342db9d99 2024-11-27T16:21:29,220 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/C/77dfe2d26d7947c9af1b74b91b990506 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/C/77dfe2d26d7947c9af1b74b91b990506 2024-11-27T16:21:29,221 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/C/9a150d9d742e4ff78e7a66bd60dccd4b to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/C/9a150d9d742e4ff78e7a66bd60dccd4b 2024-11-27T16:21:29,222 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/C/ff5d307711224e0d9a098717db1a5228 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/C/ff5d307711224e0d9a098717db1a5228 2024-11-27T16:21:29,225 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/recovered.edits/562.seqid to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9/recovered.edits/562.seqid 2024-11-27T16:21:29,226 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/d498a187112eb3635082ffac2dfb4cf9 2024-11-27T16:21:29,226 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(313): Archived TestAcidGuarantees regions 2024-11-27T16:21:29,231 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=38, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-27T16:21:29,235 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44169 {}] util.ReflectedFunctionCache(97): Populated cache for org.apache.hadoop.hbase.filter.KeyOnlyFilter in 0ms 2024-11-27T16:21:29,238 WARN [PEWorker-2 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 1 rows of TestAcidGuarantees from hbase:meta 2024-11-27T16:21:29,268 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(407): Removing 'TestAcidGuarantees' descriptor. 2024-11-27T16:21:29,270 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=38, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-27T16:21:29,270 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(397): Removing 'TestAcidGuarantees' from region states. 2024-11-27T16:21:29,270 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732724489270"}]},"ts":"9223372036854775807"} 2024-11-27T16:21:29,273 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1808): Deleted 1 regions from META 2024-11-27T16:21:29,273 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => d498a187112eb3635082ffac2dfb4cf9, NAME => 'TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9.', STARTKEY => '', ENDKEY => ''}] 2024-11-27T16:21:29,273 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(401): Marking 'TestAcidGuarantees' as deleted. 2024-11-27T16:21:29,273 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1732724489273"}]},"ts":"9223372036854775807"} 2024-11-27T16:21:29,276 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1678): Deleted table TestAcidGuarantees state from META 2024-11-27T16:21:29,278 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(133): Finished pid=38, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-27T16:21:29,279 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=38, state=SUCCESS; DeleteTableProcedure table=TestAcidGuarantees in 85 msec 2024-11-27T16:21:29,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=38 2024-11-27T16:21:29,300 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:TestAcidGuarantees, procId: 38 completed 2024-11-27T16:21:29,312 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: TestAcidGuaranteesWithBasicPolicy#testMixedAtomicity Thread=238 (was 219) Potentially hanging thread: RS:0;7b191dec6496:44169-shortCompactions-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.PriorityBlockingQueue.take(PriorityBlockingQueue.java:535) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RSProcedureDispatcher-pool-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-11 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-5 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-13 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-7 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-2 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:62) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:883) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_CLOSE_REGION-regionserver/7b191dec6496:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-4 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x52e4f869-shared-pool-5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x52e4f869-shared-pool-6 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x52e4f869-shared-pool-7 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Timer for 'HBase' metrics system java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: RS_OPEN_REGION-regionserver/7b191dec6496:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-12 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS-EventLoopGroup-1-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-9 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x52e4f869-shared-pool-4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-6 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-8 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1458025360_22 at /127.0.0.1:42910 [Waiting for operation #13] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-10 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-3 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=450 (was 444) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=322 (was 235) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=4539 (was 5086) 2024-11-27T16:21:29,322 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: TestAcidGuaranteesWithBasicPolicy#testMobMixedAtomicity Thread=238, OpenFileDescriptor=450, MaxFileDescriptor=1048576, SystemLoadAverage=322, ProcessCount=11, AvailableMemoryMB=4539 2024-11-27T16:21:29,324 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-11-27T16:21:29,324 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'BASIC'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-27T16:21:29,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] procedure2.ProcedureExecutor(1098): Stored pid=39, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestAcidGuarantees 2024-11-27T16:21:29,327 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=39, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_PRE_OPERATION 2024-11-27T16:21:29,327 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:29,327 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestAcidGuarantees" procId is: 39 2024-11-27T16:21:29,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=39 2024-11-27T16:21:29,328 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=39, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-27T16:21:29,334 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073741959_1135 (size=960) 2024-11-27T16:21:29,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=39 2024-11-27T16:21:29,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=39 2024-11-27T16:21:29,737 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 0eeb0588edb2caebe71f8272627a699d, NAME => 'TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'BASIC', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59 2024-11-27T16:21:29,742 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073741960_1136 (size=53) 2024-11-27T16:21:29,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=39 2024-11-27T16:21:30,043 DEBUG [BootstrapNodeManager {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-27T16:21:30,045 INFO [RS-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40910, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-11-27T16:21:30,144 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-27T16:21:30,144 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1681): Closing 0eeb0588edb2caebe71f8272627a699d, disabling compactions & flushes 2024-11-27T16:21:30,144 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d. 2024-11-27T16:21:30,144 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d. 2024-11-27T16:21:30,144 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d. after waiting 0 ms 2024-11-27T16:21:30,144 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d. 2024-11-27T16:21:30,144 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d. 2024-11-27T16:21:30,144 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1635): Region close journal for 0eeb0588edb2caebe71f8272627a699d: 2024-11-27T16:21:30,146 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=39, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ADD_TO_META 2024-11-27T16:21:30,146 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.","families":{"info":[{"qualifier":"regioninfo","vlen":52,"tag":[],"timestamp":"1732724490146"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732724490146"}]},"ts":"1732724490146"} 2024-11-27T16:21:30,148 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-11-27T16:21:30,148 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=39, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-27T16:21:30,149 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732724490148"}]},"ts":"1732724490148"} 2024-11-27T16:21:30,150 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLING in hbase:meta 2024-11-27T16:21:30,154 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=40, ppid=39, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=0eeb0588edb2caebe71f8272627a699d, ASSIGN}] 2024-11-27T16:21:30,155 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=40, ppid=39, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=0eeb0588edb2caebe71f8272627a699d, ASSIGN 2024-11-27T16:21:30,155 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(264): Starting pid=40, ppid=39, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=0eeb0588edb2caebe71f8272627a699d, ASSIGN; state=OFFLINE, location=7b191dec6496,44169,1732724452967; forceNewPlan=false, retain=false 2024-11-27T16:21:30,306 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=40 updating hbase:meta row=0eeb0588edb2caebe71f8272627a699d, regionState=OPENING, regionLocation=7b191dec6496,44169,1732724452967 2024-11-27T16:21:30,307 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=41, ppid=40, state=RUNNABLE; OpenRegionProcedure 0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967}] 2024-11-27T16:21:30,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=39 2024-11-27T16:21:30,459 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:21:30,462 INFO [RS_OPEN_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_OPEN_REGION, pid=41}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d. 2024-11-27T16:21:30,463 DEBUG [RS_OPEN_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_OPEN_REGION, pid=41}] regionserver.HRegion(7285): Opening region: {ENCODED => 0eeb0588edb2caebe71f8272627a699d, NAME => 'TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.', STARTKEY => '', ENDKEY => ''} 2024-11-27T16:21:30,463 DEBUG [RS_OPEN_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_OPEN_REGION, pid=41}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees 0eeb0588edb2caebe71f8272627a699d 2024-11-27T16:21:30,463 DEBUG [RS_OPEN_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_OPEN_REGION, pid=41}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-27T16:21:30,463 DEBUG [RS_OPEN_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_OPEN_REGION, pid=41}] regionserver.HRegion(7327): checking encryption for 0eeb0588edb2caebe71f8272627a699d 2024-11-27T16:21:30,463 DEBUG [RS_OPEN_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_OPEN_REGION, pid=41}] regionserver.HRegion(7330): checking classloading for 0eeb0588edb2caebe71f8272627a699d 2024-11-27T16:21:30,464 INFO [StoreOpener-0eeb0588edb2caebe71f8272627a699d-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region 0eeb0588edb2caebe71f8272627a699d 2024-11-27T16:21:30,466 INFO [StoreOpener-0eeb0588edb2caebe71f8272627a699d-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-27T16:21:30,466 INFO [StoreOpener-0eeb0588edb2caebe71f8272627a699d-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 0eeb0588edb2caebe71f8272627a699d columnFamilyName A 2024-11-27T16:21:30,466 DEBUG [StoreOpener-0eeb0588edb2caebe71f8272627a699d-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:30,467 INFO [StoreOpener-0eeb0588edb2caebe71f8272627a699d-1 {}] regionserver.HStore(327): Store=0eeb0588edb2caebe71f8272627a699d/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-27T16:21:30,467 INFO [StoreOpener-0eeb0588edb2caebe71f8272627a699d-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region 0eeb0588edb2caebe71f8272627a699d 2024-11-27T16:21:30,468 INFO [StoreOpener-0eeb0588edb2caebe71f8272627a699d-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-27T16:21:30,469 INFO [StoreOpener-0eeb0588edb2caebe71f8272627a699d-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 0eeb0588edb2caebe71f8272627a699d columnFamilyName B 2024-11-27T16:21:30,469 DEBUG [StoreOpener-0eeb0588edb2caebe71f8272627a699d-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:30,470 INFO [StoreOpener-0eeb0588edb2caebe71f8272627a699d-1 {}] regionserver.HStore(327): Store=0eeb0588edb2caebe71f8272627a699d/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-27T16:21:30,470 INFO [StoreOpener-0eeb0588edb2caebe71f8272627a699d-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region 0eeb0588edb2caebe71f8272627a699d 2024-11-27T16:21:30,471 INFO [StoreOpener-0eeb0588edb2caebe71f8272627a699d-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-27T16:21:30,471 INFO [StoreOpener-0eeb0588edb2caebe71f8272627a699d-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 0eeb0588edb2caebe71f8272627a699d columnFamilyName C 2024-11-27T16:21:30,471 DEBUG [StoreOpener-0eeb0588edb2caebe71f8272627a699d-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:30,472 INFO [StoreOpener-0eeb0588edb2caebe71f8272627a699d-1 {}] regionserver.HStore(327): Store=0eeb0588edb2caebe71f8272627a699d/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-27T16:21:30,472 INFO [RS_OPEN_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_OPEN_REGION, pid=41}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d. 2024-11-27T16:21:30,472 DEBUG [RS_OPEN_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_OPEN_REGION, pid=41}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d 2024-11-27T16:21:30,473 DEBUG [RS_OPEN_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_OPEN_REGION, pid=41}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d 2024-11-27T16:21:30,474 DEBUG [RS_OPEN_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_OPEN_REGION, pid=41}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-27T16:21:30,475 DEBUG [RS_OPEN_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_OPEN_REGION, pid=41}] regionserver.HRegion(1085): writing seq id for 0eeb0588edb2caebe71f8272627a699d 2024-11-27T16:21:30,477 DEBUG [RS_OPEN_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_OPEN_REGION, pid=41}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-27T16:21:30,478 INFO [RS_OPEN_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_OPEN_REGION, pid=41}] regionserver.HRegion(1102): Opened 0eeb0588edb2caebe71f8272627a699d; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=61346140, jitterRate=-0.08587127923965454}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-27T16:21:30,478 DEBUG [RS_OPEN_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_OPEN_REGION, pid=41}] regionserver.HRegion(1001): Region open journal for 0eeb0588edb2caebe71f8272627a699d: 2024-11-27T16:21:30,479 INFO [RS_OPEN_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_OPEN_REGION, pid=41}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d., pid=41, masterSystemTime=1732724490459 2024-11-27T16:21:30,481 DEBUG [RS_OPEN_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_OPEN_REGION, pid=41}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d. 2024-11-27T16:21:30,481 INFO [RS_OPEN_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_OPEN_REGION, pid=41}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d. 2024-11-27T16:21:30,481 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=40 updating hbase:meta row=0eeb0588edb2caebe71f8272627a699d, regionState=OPEN, openSeqNum=2, regionLocation=7b191dec6496,44169,1732724452967 2024-11-27T16:21:30,484 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=41, resume processing ppid=40 2024-11-27T16:21:30,484 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=41, ppid=40, state=SUCCESS; OpenRegionProcedure 0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 in 176 msec 2024-11-27T16:21:30,486 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=40, resume processing ppid=39 2024-11-27T16:21:30,486 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=40, ppid=39, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=0eeb0588edb2caebe71f8272627a699d, ASSIGN in 330 msec 2024-11-27T16:21:30,486 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=39, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-27T16:21:30,486 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732724490486"}]},"ts":"1732724490486"} 2024-11-27T16:21:30,488 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLED in hbase:meta 2024-11-27T16:21:30,490 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=39, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_POST_OPERATION 2024-11-27T16:21:30,491 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=39, state=SUCCESS; CreateTableProcedure table=TestAcidGuarantees in 1.1660 sec 2024-11-27T16:21:31,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=39 2024-11-27T16:21:31,433 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestAcidGuarantees, procId: 39 completed 2024-11-27T16:21:31,435 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x415dec94 to 127.0.0.1:51088 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@7fde36e8 2024-11-27T16:21:31,439 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7f48093f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-27T16:21:31,441 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-27T16:21:31,443 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39158, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-27T16:21:31,445 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-27T16:21:31,446 INFO [RS-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40916, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-27T16:21:31,452 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-11-27T16:21:31,452 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster$14(2798): Client=jenkins//172.17.0.2 modify table TestAcidGuarantees from 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'BASIC', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} to 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'BASIC', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '4', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-27T16:21:31,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] procedure2.ProcedureExecutor(1098): Stored pid=42, state=RUNNABLE:MODIFY_TABLE_PREPARE; ModifyTableProcedure table=TestAcidGuarantees 2024-11-27T16:21:31,474 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073741961_1137 (size=996) 2024-11-27T16:21:31,876 DEBUG [PEWorker-1 {}] util.FSTableDescriptors(519): Deleted hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/.tabledesc/.tableinfo.0000000001.960 2024-11-27T16:21:31,876 INFO [PEWorker-1 {}] util.FSTableDescriptors(297): Updated tableinfo=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/.tabledesc/.tableinfo.0000000002.996 2024-11-27T16:21:31,880 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=43, ppid=42, state=RUNNABLE:REOPEN_TABLE_REGIONS_GET_REGIONS; ReopenTableRegionsProcedure table=TestAcidGuarantees}] 2024-11-27T16:21:31,888 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=44, ppid=43, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=0eeb0588edb2caebe71f8272627a699d, REOPEN/MOVE}] 2024-11-27T16:21:31,889 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=44, ppid=43, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=0eeb0588edb2caebe71f8272627a699d, REOPEN/MOVE 2024-11-27T16:21:31,890 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=44 updating hbase:meta row=0eeb0588edb2caebe71f8272627a699d, regionState=CLOSING, regionLocation=7b191dec6496,44169,1732724452967 2024-11-27T16:21:31,891 DEBUG [PEWorker-2 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-27T16:21:31,891 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=45, ppid=44, state=RUNNABLE; CloseRegionProcedure 0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967}] 2024-11-27T16:21:32,043 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:21:32,043 INFO [RS_CLOSE_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_CLOSE_REGION, pid=45}] handler.UnassignRegionHandler(124): Close 0eeb0588edb2caebe71f8272627a699d 2024-11-27T16:21:32,043 DEBUG [RS_CLOSE_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_CLOSE_REGION, pid=45}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-11-27T16:21:32,044 DEBUG [RS_CLOSE_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_CLOSE_REGION, pid=45}] regionserver.HRegion(1681): Closing 0eeb0588edb2caebe71f8272627a699d, disabling compactions & flushes 2024-11-27T16:21:32,044 INFO [RS_CLOSE_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_CLOSE_REGION, pid=45}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d. 2024-11-27T16:21:32,044 DEBUG [RS_CLOSE_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_CLOSE_REGION, pid=45}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d. 2024-11-27T16:21:32,044 DEBUG [RS_CLOSE_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_CLOSE_REGION, pid=45}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d. after waiting 0 ms 2024-11-27T16:21:32,044 DEBUG [RS_CLOSE_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_CLOSE_REGION, pid=45}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d. 2024-11-27T16:21:32,048 DEBUG [RS_CLOSE_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_CLOSE_REGION, pid=45}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/recovered.edits/4.seqid, newMaxSeqId=4, maxSeqId=1 2024-11-27T16:21:32,048 INFO [RS_CLOSE_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_CLOSE_REGION, pid=45}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d. 2024-11-27T16:21:32,048 DEBUG [RS_CLOSE_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_CLOSE_REGION, pid=45}] regionserver.HRegion(1635): Region close journal for 0eeb0588edb2caebe71f8272627a699d: 2024-11-27T16:21:32,048 WARN [RS_CLOSE_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_CLOSE_REGION, pid=45}] regionserver.HRegionServer(3786): Not adding moved region record: 0eeb0588edb2caebe71f8272627a699d to self. 2024-11-27T16:21:32,050 INFO [RS_CLOSE_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_CLOSE_REGION, pid=45}] handler.UnassignRegionHandler(170): Closed 0eeb0588edb2caebe71f8272627a699d 2024-11-27T16:21:32,050 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=44 updating hbase:meta row=0eeb0588edb2caebe71f8272627a699d, regionState=CLOSED 2024-11-27T16:21:32,053 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=45, resume processing ppid=44 2024-11-27T16:21:32,053 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=45, ppid=44, state=SUCCESS; CloseRegionProcedure 0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 in 160 msec 2024-11-27T16:21:32,053 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(264): Starting pid=44, ppid=43, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=0eeb0588edb2caebe71f8272627a699d, REOPEN/MOVE; state=CLOSED, location=7b191dec6496,44169,1732724452967; forceNewPlan=false, retain=true 2024-11-27T16:21:32,204 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=44 updating hbase:meta row=0eeb0588edb2caebe71f8272627a699d, regionState=OPENING, regionLocation=7b191dec6496,44169,1732724452967 2024-11-27T16:21:32,206 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=46, ppid=44, state=RUNNABLE; OpenRegionProcedure 0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967}] 2024-11-27T16:21:32,357 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:21:32,361 INFO [RS_OPEN_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_OPEN_REGION, pid=46}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d. 2024-11-27T16:21:32,361 DEBUG [RS_OPEN_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_OPEN_REGION, pid=46}] regionserver.HRegion(7285): Opening region: {ENCODED => 0eeb0588edb2caebe71f8272627a699d, NAME => 'TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.', STARTKEY => '', ENDKEY => ''} 2024-11-27T16:21:32,361 DEBUG [RS_OPEN_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_OPEN_REGION, pid=46}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees 0eeb0588edb2caebe71f8272627a699d 2024-11-27T16:21:32,361 DEBUG [RS_OPEN_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_OPEN_REGION, pid=46}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-27T16:21:32,362 DEBUG [RS_OPEN_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_OPEN_REGION, pid=46}] regionserver.HRegion(7327): checking encryption for 0eeb0588edb2caebe71f8272627a699d 2024-11-27T16:21:32,362 DEBUG [RS_OPEN_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_OPEN_REGION, pid=46}] regionserver.HRegion(7330): checking classloading for 0eeb0588edb2caebe71f8272627a699d 2024-11-27T16:21:32,366 INFO [StoreOpener-0eeb0588edb2caebe71f8272627a699d-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region 0eeb0588edb2caebe71f8272627a699d 2024-11-27T16:21:32,367 INFO [StoreOpener-0eeb0588edb2caebe71f8272627a699d-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-27T16:21:32,371 INFO [StoreOpener-0eeb0588edb2caebe71f8272627a699d-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 0eeb0588edb2caebe71f8272627a699d columnFamilyName A 2024-11-27T16:21:32,373 DEBUG [StoreOpener-0eeb0588edb2caebe71f8272627a699d-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:32,374 INFO [StoreOpener-0eeb0588edb2caebe71f8272627a699d-1 {}] regionserver.HStore(327): Store=0eeb0588edb2caebe71f8272627a699d/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-27T16:21:32,374 INFO [StoreOpener-0eeb0588edb2caebe71f8272627a699d-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region 0eeb0588edb2caebe71f8272627a699d 2024-11-27T16:21:32,375 INFO [StoreOpener-0eeb0588edb2caebe71f8272627a699d-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-27T16:21:32,375 INFO [StoreOpener-0eeb0588edb2caebe71f8272627a699d-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 0eeb0588edb2caebe71f8272627a699d columnFamilyName B 2024-11-27T16:21:32,375 DEBUG [StoreOpener-0eeb0588edb2caebe71f8272627a699d-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:32,376 INFO [StoreOpener-0eeb0588edb2caebe71f8272627a699d-1 {}] regionserver.HStore(327): Store=0eeb0588edb2caebe71f8272627a699d/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-27T16:21:32,376 INFO [StoreOpener-0eeb0588edb2caebe71f8272627a699d-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region 0eeb0588edb2caebe71f8272627a699d 2024-11-27T16:21:32,376 INFO [StoreOpener-0eeb0588edb2caebe71f8272627a699d-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-27T16:21:32,377 INFO [StoreOpener-0eeb0588edb2caebe71f8272627a699d-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 0eeb0588edb2caebe71f8272627a699d columnFamilyName C 2024-11-27T16:21:32,377 DEBUG [StoreOpener-0eeb0588edb2caebe71f8272627a699d-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:32,377 INFO [StoreOpener-0eeb0588edb2caebe71f8272627a699d-1 {}] regionserver.HStore(327): Store=0eeb0588edb2caebe71f8272627a699d/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-27T16:21:32,377 INFO [RS_OPEN_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_OPEN_REGION, pid=46}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d. 2024-11-27T16:21:32,378 DEBUG [RS_OPEN_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_OPEN_REGION, pid=46}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d 2024-11-27T16:21:32,379 DEBUG [RS_OPEN_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_OPEN_REGION, pid=46}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d 2024-11-27T16:21:32,380 DEBUG [RS_OPEN_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_OPEN_REGION, pid=46}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-27T16:21:32,381 DEBUG [RS_OPEN_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_OPEN_REGION, pid=46}] regionserver.HRegion(1085): writing seq id for 0eeb0588edb2caebe71f8272627a699d 2024-11-27T16:21:32,382 INFO [RS_OPEN_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_OPEN_REGION, pid=46}] regionserver.HRegion(1102): Opened 0eeb0588edb2caebe71f8272627a699d; next sequenceid=5; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=59558354, jitterRate=-0.11251136660575867}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-27T16:21:32,384 DEBUG [RS_OPEN_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_OPEN_REGION, pid=46}] regionserver.HRegion(1001): Region open journal for 0eeb0588edb2caebe71f8272627a699d: 2024-11-27T16:21:32,384 INFO [RS_OPEN_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_OPEN_REGION, pid=46}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d., pid=46, masterSystemTime=1732724492357 2024-11-27T16:21:32,386 DEBUG [RS_OPEN_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_OPEN_REGION, pid=46}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d. 2024-11-27T16:21:32,386 INFO [RS_OPEN_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_OPEN_REGION, pid=46}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d. 2024-11-27T16:21:32,386 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=44 updating hbase:meta row=0eeb0588edb2caebe71f8272627a699d, regionState=OPEN, openSeqNum=5, regionLocation=7b191dec6496,44169,1732724452967 2024-11-27T16:21:32,389 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=46, resume processing ppid=44 2024-11-27T16:21:32,389 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=46, ppid=44, state=SUCCESS; OpenRegionProcedure 0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 in 182 msec 2024-11-27T16:21:32,390 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=44, resume processing ppid=43 2024-11-27T16:21:32,391 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=44, ppid=43, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=0eeb0588edb2caebe71f8272627a699d, REOPEN/MOVE in 501 msec 2024-11-27T16:21:32,393 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=43, resume processing ppid=42 2024-11-27T16:21:32,393 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=43, ppid=42, state=SUCCESS; ReopenTableRegionsProcedure table=TestAcidGuarantees in 512 msec 2024-11-27T16:21:32,396 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=42, state=SUCCESS; ModifyTableProcedure table=TestAcidGuarantees in 940 msec 2024-11-27T16:21:32,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=42 2024-11-27T16:21:32,406 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x5a3d7b93 to 127.0.0.1:51088 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@109a98f4 2024-11-27T16:21:32,413 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4d3cf478, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-27T16:21:32,415 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x5ad21927 to 127.0.0.1:51088 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@4169e339 2024-11-27T16:21:32,418 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4cd0bf5, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-27T16:21:32,419 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x40e8ce40 to 127.0.0.1:51088 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@50484683 2024-11-27T16:21:32,423 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@77f4d875, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-27T16:21:32,424 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x2939e0db to 127.0.0.1:51088 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@2b64ccdf 2024-11-27T16:21:32,427 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2e4c3b1f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-27T16:21:32,428 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x2362c8ba to 127.0.0.1:51088 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@42b9a37d 2024-11-27T16:21:32,431 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@de2fcf6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-27T16:21:32,432 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x0c645fa1 to 127.0.0.1:51088 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@16fb1797 2024-11-27T16:21:32,435 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7c368568, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-27T16:21:32,436 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x1c317ae0 to 127.0.0.1:51088 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@5238815e 2024-11-27T16:21:32,439 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1aed43b4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-27T16:21:32,440 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x0a0fc918 to 127.0.0.1:51088 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@1c2b9b76 2024-11-27T16:21:32,444 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6cab9ba4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-27T16:21:32,445 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x77b8b9d2 to 127.0.0.1:51088 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@37886c78 2024-11-27T16:21:32,448 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@74eb796, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-27T16:21:32,453 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-27T16:21:32,453 DEBUG [hconnection-0x5ac15ae3-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-27T16:21:32,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] procedure2.ProcedureExecutor(1098): Stored pid=47, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=47, table=TestAcidGuarantees 2024-11-27T16:21:32,455 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=47, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=47, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-27T16:21:32,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=47 2024-11-27T16:21:32,455 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=47, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=47, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-27T16:21:32,455 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=48, ppid=47, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-27T16:21:32,458 DEBUG [hconnection-0x3e73df77-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-27T16:21:32,459 DEBUG [hconnection-0x508bc58f-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-27T16:21:32,460 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39180, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-27T16:21:32,460 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39168, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-27T16:21:32,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(8581): Flush requested on 0eeb0588edb2caebe71f8272627a699d 2024-11-27T16:21:32,475 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 0eeb0588edb2caebe71f8272627a699d 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-27T16:21:32,476 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0eeb0588edb2caebe71f8272627a699d, store=A 2024-11-27T16:21:32,476 DEBUG [hconnection-0x3b6534b6-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-27T16:21:32,476 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:21:32,476 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0eeb0588edb2caebe71f8272627a699d, store=B 2024-11-27T16:21:32,476 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:21:32,476 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0eeb0588edb2caebe71f8272627a699d, store=C 2024-11-27T16:21:32,476 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:21:32,478 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39194, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-27T16:21:32,480 DEBUG [hconnection-0x13120d9f-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-27T16:21:32,482 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39208, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-27T16:21:32,485 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39214, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-27T16:21:32,497 DEBUG [hconnection-0x5b825a9-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-27T16:21:32,498 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39228, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-27T16:21:32,500 DEBUG [hconnection-0x4561b0a6-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-27T16:21:32,501 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39242, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-27T16:21:32,504 DEBUG [hconnection-0x1c8dd5d7-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-27T16:21:32,505 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:32,505 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39248, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-27T16:21:32,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39208 deadline: 1732724552503, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:32,505 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:32,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39180 deadline: 1732724552504, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:32,506 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:32,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39168 deadline: 1732724552504, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:32,506 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:32,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 3 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39228 deadline: 1732724552505, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:32,507 DEBUG [hconnection-0x477ee78f-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-27T16:21:32,508 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39264, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-27T16:21:32,511 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:32,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 2 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39264 deadline: 1732724552509, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:32,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=47 2024-11-27T16:21:32,574 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411278058163c424643f4bee84449b65d5b53_0eeb0588edb2caebe71f8272627a699d is 50, key is test_row_0/A:col10/1732724492468/Put/seqid=0 2024-11-27T16:21:32,596 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073741962_1138 (size=12154) 2024-11-27T16:21:32,597 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:32,603 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411278058163c424643f4bee84449b65d5b53_0eeb0588edb2caebe71f8272627a699d to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411278058163c424643f4bee84449b65d5b53_0eeb0588edb2caebe71f8272627a699d 2024-11-27T16:21:32,604 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/.tmp/A/002f374727ed434abad4fdc81c620220, store: [table=TestAcidGuarantees family=A region=0eeb0588edb2caebe71f8272627a699d] 2024-11-27T16:21:32,606 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:21:32,606 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=48 2024-11-27T16:21:32,607 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d. 2024-11-27T16:21:32,607 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d. as already flushing 2024-11-27T16:21:32,607 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d. 2024-11-27T16:21:32,607 ERROR [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] handler.RSProcedureHandler(58): pid=48 java.io.IOException: Unable to complete flush {ENCODED => 0eeb0588edb2caebe71f8272627a699d, NAME => 'TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:21:32,607 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=48 java.io.IOException: Unable to complete flush {ENCODED => 0eeb0588edb2caebe71f8272627a699d, NAME => 'TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:21:32,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4114): Remote procedure failed, pid=48 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0eeb0588edb2caebe71f8272627a699d, NAME => 'TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0eeb0588edb2caebe71f8272627a699d, NAME => 'TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:21:32,614 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:32,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39168 deadline: 1732724552608, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:32,616 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:32,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39208 deadline: 1732724552608, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:32,616 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:32,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 5 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39228 deadline: 1732724552608, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:32,618 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/.tmp/A/002f374727ed434abad4fdc81c620220 is 175, key is test_row_0/A:col10/1732724492468/Put/seqid=0 2024-11-27T16:21:32,621 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:32,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39180 deadline: 1732724552611, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:32,622 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:32,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 4 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39264 deadline: 1732724552612, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:32,637 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073741963_1139 (size=30955) 2024-11-27T16:21:32,638 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=16, memsize=20.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/.tmp/A/002f374727ed434abad4fdc81c620220 2024-11-27T16:21:32,674 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/.tmp/B/55f562718c5948c092bda235c76c99b1 is 50, key is test_row_0/B:col10/1732724492468/Put/seqid=0 2024-11-27T16:21:32,679 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073741964_1140 (size=12001) 2024-11-27T16:21:32,680 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=16 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/.tmp/B/55f562718c5948c092bda235c76c99b1 2024-11-27T16:21:32,716 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/.tmp/C/57fa462da93141e1931551e7f00628e8 is 50, key is test_row_0/C:col10/1732724492468/Put/seqid=0 2024-11-27T16:21:32,739 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073741965_1141 (size=12001) 2024-11-27T16:21:32,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=47 2024-11-27T16:21:32,763 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:21:32,765 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=48 2024-11-27T16:21:32,765 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d. 2024-11-27T16:21:32,765 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d. as already flushing 2024-11-27T16:21:32,765 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d. 2024-11-27T16:21:32,765 ERROR [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] handler.RSProcedureHandler(58): pid=48 java.io.IOException: Unable to complete flush {ENCODED => 0eeb0588edb2caebe71f8272627a699d, NAME => 'TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:21:32,766 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=48 java.io.IOException: Unable to complete flush {ENCODED => 0eeb0588edb2caebe71f8272627a699d, NAME => 'TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:21:32,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4114): Remote procedure failed, pid=48 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0eeb0588edb2caebe71f8272627a699d, NAME => 'TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0eeb0588edb2caebe71f8272627a699d, NAME => 'TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:21:32,818 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:32,818 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:32,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39208 deadline: 1732724552818, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:32,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39168 deadline: 1732724552818, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:32,819 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:32,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39228 deadline: 1732724552819, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:32,824 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:32,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 6 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39264 deadline: 1732724552824, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:32,825 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:32,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39180 deadline: 1732724552824, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:32,918 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:21:32,918 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=48 2024-11-27T16:21:32,919 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d. 2024-11-27T16:21:32,919 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d. as already flushing 2024-11-27T16:21:32,919 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d. 2024-11-27T16:21:32,919 ERROR [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] handler.RSProcedureHandler(58): pid=48 java.io.IOException: Unable to complete flush {ENCODED => 0eeb0588edb2caebe71f8272627a699d, NAME => 'TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:21:32,919 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=48 java.io.IOException: Unable to complete flush {ENCODED => 0eeb0588edb2caebe71f8272627a699d, NAME => 'TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:21:32,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4114): Remote procedure failed, pid=48 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0eeb0588edb2caebe71f8272627a699d, NAME => 'TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0eeb0588edb2caebe71f8272627a699d, NAME => 'TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:21:33,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=47 2024-11-27T16:21:33,072 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:21:33,072 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=48 2024-11-27T16:21:33,073 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d. 2024-11-27T16:21:33,073 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d. as already flushing 2024-11-27T16:21:33,073 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d. 2024-11-27T16:21:33,073 ERROR [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] handler.RSProcedureHandler(58): pid=48 java.io.IOException: Unable to complete flush {ENCODED => 0eeb0588edb2caebe71f8272627a699d, NAME => 'TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:21:33,073 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=48 java.io.IOException: Unable to complete flush {ENCODED => 0eeb0588edb2caebe71f8272627a699d, NAME => 'TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:21:33,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4114): Remote procedure failed, pid=48 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0eeb0588edb2caebe71f8272627a699d, NAME => 'TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0eeb0588edb2caebe71f8272627a699d, NAME => 'TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:21:33,122 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:33,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39208 deadline: 1732724553121, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:33,123 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:33,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39168 deadline: 1732724553122, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:33,127 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:33,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39228 deadline: 1732724553124, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:33,130 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:33,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39264 deadline: 1732724553129, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:33,132 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:33,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39180 deadline: 1732724553131, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:33,140 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=16 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/.tmp/C/57fa462da93141e1931551e7f00628e8 2024-11-27T16:21:33,147 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/.tmp/A/002f374727ed434abad4fdc81c620220 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/A/002f374727ed434abad4fdc81c620220 2024-11-27T16:21:33,155 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/A/002f374727ed434abad4fdc81c620220, entries=150, sequenceid=16, filesize=30.2 K 2024-11-27T16:21:33,156 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/.tmp/B/55f562718c5948c092bda235c76c99b1 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/B/55f562718c5948c092bda235c76c99b1 2024-11-27T16:21:33,169 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/B/55f562718c5948c092bda235c76c99b1, entries=150, sequenceid=16, filesize=11.7 K 2024-11-27T16:21:33,171 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/.tmp/C/57fa462da93141e1931551e7f00628e8 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/C/57fa462da93141e1931551e7f00628e8 2024-11-27T16:21:33,179 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/C/57fa462da93141e1931551e7f00628e8, entries=150, sequenceid=16, filesize=11.7 K 2024-11-27T16:21:33,180 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=154.31 KB/158010 for 0eeb0588edb2caebe71f8272627a699d in 706ms, sequenceid=16, compaction requested=false 2024-11-27T16:21:33,180 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 0eeb0588edb2caebe71f8272627a699d: 2024-11-27T16:21:33,226 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:21:33,226 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=48 2024-11-27T16:21:33,226 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d. 2024-11-27T16:21:33,227 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2837): Flushing 0eeb0588edb2caebe71f8272627a699d 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-11-27T16:21:33,227 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0eeb0588edb2caebe71f8272627a699d, store=A 2024-11-27T16:21:33,227 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:21:33,227 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0eeb0588edb2caebe71f8272627a699d, store=B 2024-11-27T16:21:33,227 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:21:33,227 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0eeb0588edb2caebe71f8272627a699d, store=C 2024-11-27T16:21:33,227 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:21:33,254 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241127e07c85fa0a7747aa8d7b533b027c3e40_0eeb0588edb2caebe71f8272627a699d is 50, key is test_row_0/A:col10/1732724492503/Put/seqid=0 2024-11-27T16:21:33,270 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073741966_1142 (size=12154) 2024-11-27T16:21:33,271 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:33,277 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241127e07c85fa0a7747aa8d7b533b027c3e40_0eeb0588edb2caebe71f8272627a699d to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241127e07c85fa0a7747aa8d7b533b027c3e40_0eeb0588edb2caebe71f8272627a699d 2024-11-27T16:21:33,278 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/.tmp/A/3407a8a656b8441e85075602cd8a4aae, store: [table=TestAcidGuarantees family=A region=0eeb0588edb2caebe71f8272627a699d] 2024-11-27T16:21:33,279 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/.tmp/A/3407a8a656b8441e85075602cd8a4aae is 175, key is test_row_0/A:col10/1732724492503/Put/seqid=0 2024-11-27T16:21:33,299 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073741967_1143 (size=30955) 2024-11-27T16:21:33,300 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=42, memsize=51.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/.tmp/A/3407a8a656b8441e85075602cd8a4aae 2024-11-27T16:21:33,316 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/.tmp/B/c6aad0e13f824890a3e7d2b54cb5ad65 is 50, key is test_row_0/B:col10/1732724492503/Put/seqid=0 2024-11-27T16:21:33,342 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073741968_1144 (size=12001) 2024-11-27T16:21:33,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=47 2024-11-27T16:21:33,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(8581): Flush requested on 0eeb0588edb2caebe71f8272627a699d 2024-11-27T16:21:33,630 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d. as already flushing 2024-11-27T16:21:33,640 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:33,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39264 deadline: 1732724553636, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:33,642 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:33,642 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:33,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39180 deadline: 1732724553637, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:33,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39208 deadline: 1732724553637, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:33,642 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:33,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39168 deadline: 1732724553640, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:33,642 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:33,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39228 deadline: 1732724553641, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:33,742 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=42 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/.tmp/B/c6aad0e13f824890a3e7d2b54cb5ad65 2024-11-27T16:21:33,745 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:33,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39168 deadline: 1732724553744, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:33,747 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:33,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39208 deadline: 1732724553744, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:33,748 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:33,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39228 deadline: 1732724553745, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:33,757 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/.tmp/C/ffdb929f95d34f5bbc3e4310191d39c4 is 50, key is test_row_0/C:col10/1732724492503/Put/seqid=0 2024-11-27T16:21:33,765 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073741969_1145 (size=12001) 2024-11-27T16:21:33,949 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:33,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39168 deadline: 1732724553948, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:33,950 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:33,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39228 deadline: 1732724553950, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:33,957 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:33,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39208 deadline: 1732724553956, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:34,166 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=42 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/.tmp/C/ffdb929f95d34f5bbc3e4310191d39c4 2024-11-27T16:21:34,174 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/.tmp/A/3407a8a656b8441e85075602cd8a4aae as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/A/3407a8a656b8441e85075602cd8a4aae 2024-11-27T16:21:34,180 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/A/3407a8a656b8441e85075602cd8a4aae, entries=150, sequenceid=42, filesize=30.2 K 2024-11-27T16:21:34,181 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/.tmp/B/c6aad0e13f824890a3e7d2b54cb5ad65 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/B/c6aad0e13f824890a3e7d2b54cb5ad65 2024-11-27T16:21:34,192 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/B/c6aad0e13f824890a3e7d2b54cb5ad65, entries=150, sequenceid=42, filesize=11.7 K 2024-11-27T16:21:34,195 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/.tmp/C/ffdb929f95d34f5bbc3e4310191d39c4 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/C/ffdb929f95d34f5bbc3e4310191d39c4 2024-11-27T16:21:34,202 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/C/ffdb929f95d34f5bbc3e4310191d39c4, entries=150, sequenceid=42, filesize=11.7 K 2024-11-27T16:21:34,203 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=46.96 KB/48090 for 0eeb0588edb2caebe71f8272627a699d in 976ms, sequenceid=42, compaction requested=false 2024-11-27T16:21:34,203 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2538): Flush status journal for 0eeb0588edb2caebe71f8272627a699d: 2024-11-27T16:21:34,203 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d. 2024-11-27T16:21:34,203 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=48 2024-11-27T16:21:34,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4106): Remote procedure done, pid=48 2024-11-27T16:21:34,206 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=48, resume processing ppid=47 2024-11-27T16:21:34,206 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=48, ppid=47, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.7500 sec 2024-11-27T16:21:34,208 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=47, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=47, table=TestAcidGuarantees in 1.7540 sec 2024-11-27T16:21:34,233 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-11-27T16:21:34,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(8581): Flush requested on 0eeb0588edb2caebe71f8272627a699d 2024-11-27T16:21:34,255 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 0eeb0588edb2caebe71f8272627a699d 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-27T16:21:34,255 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0eeb0588edb2caebe71f8272627a699d, store=A 2024-11-27T16:21:34,255 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:21:34,255 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0eeb0588edb2caebe71f8272627a699d, store=B 2024-11-27T16:21:34,255 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:21:34,255 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0eeb0588edb2caebe71f8272627a699d, store=C 2024-11-27T16:21:34,256 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:21:34,283 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241127a652f99f908f448c973174fbf436e7bb_0eeb0588edb2caebe71f8272627a699d is 50, key is test_row_0/A:col10/1732724493637/Put/seqid=0 2024-11-27T16:21:34,319 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073741970_1146 (size=14594) 2024-11-27T16:21:34,320 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,329 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241127a652f99f908f448c973174fbf436e7bb_0eeb0588edb2caebe71f8272627a699d to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241127a652f99f908f448c973174fbf436e7bb_0eeb0588edb2caebe71f8272627a699d 2024-11-27T16:21:34,331 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:34,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39228 deadline: 1732724554325, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:34,331 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:34,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39208 deadline: 1732724554325, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:34,332 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/.tmp/A/dd3d70498f42464987b8c669c59f8326, store: [table=TestAcidGuarantees family=A region=0eeb0588edb2caebe71f8272627a699d] 2024-11-27T16:21:34,332 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:34,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39168 deadline: 1732724554327, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:34,333 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/.tmp/A/dd3d70498f42464987b8c669c59f8326 is 175, key is test_row_0/A:col10/1732724493637/Put/seqid=0 2024-11-27T16:21:34,352 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073741971_1147 (size=39549) 2024-11-27T16:21:34,352 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=53, memsize=17.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/.tmp/A/dd3d70498f42464987b8c669c59f8326 2024-11-27T16:21:34,371 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/.tmp/B/daae7746fe5042378d4b9c12dc826fcd is 50, key is test_row_0/B:col10/1732724493637/Put/seqid=0 2024-11-27T16:21:34,418 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073741972_1148 (size=12001) 2024-11-27T16:21:34,420 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=53 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/.tmp/B/daae7746fe5042378d4b9c12dc826fcd 2024-11-27T16:21:34,434 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:34,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39228 deadline: 1732724554433, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:34,435 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/.tmp/C/ae6825c059f048588118b31bdde03149 is 50, key is test_row_0/C:col10/1732724493637/Put/seqid=0 2024-11-27T16:21:34,437 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:34,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39208 deadline: 1732724554433, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:34,439 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:34,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39168 deadline: 1732724554436, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:34,457 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073741973_1149 (size=12001) 2024-11-27T16:21:34,458 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=53 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/.tmp/C/ae6825c059f048588118b31bdde03149 2024-11-27T16:21:34,466 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/.tmp/A/dd3d70498f42464987b8c669c59f8326 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/A/dd3d70498f42464987b8c669c59f8326 2024-11-27T16:21:34,474 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/A/dd3d70498f42464987b8c669c59f8326, entries=200, sequenceid=53, filesize=38.6 K 2024-11-27T16:21:34,477 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/.tmp/B/daae7746fe5042378d4b9c12dc826fcd as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/B/daae7746fe5042378d4b9c12dc826fcd 2024-11-27T16:21:34,484 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/B/daae7746fe5042378d4b9c12dc826fcd, entries=150, sequenceid=53, filesize=11.7 K 2024-11-27T16:21:34,486 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/.tmp/C/ae6825c059f048588118b31bdde03149 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/C/ae6825c059f048588118b31bdde03149 2024-11-27T16:21:34,501 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/C/ae6825c059f048588118b31bdde03149, entries=150, sequenceid=53, filesize=11.7 K 2024-11-27T16:21:34,504 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for 0eeb0588edb2caebe71f8272627a699d in 249ms, sequenceid=53, compaction requested=true 2024-11-27T16:21:34,504 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 0eeb0588edb2caebe71f8272627a699d: 2024-11-27T16:21:34,504 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-27T16:21:34,504 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 0eeb0588edb2caebe71f8272627a699d:A, priority=-2147483648, current under compaction store size is 1 2024-11-27T16:21:34,505 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T16:21:34,505 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-27T16:21:34,506 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 101459 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-27T16:21:34,507 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HStore(1540): 0eeb0588edb2caebe71f8272627a699d/A is initiating minor compaction (all files) 2024-11-27T16:21:34,507 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 0eeb0588edb2caebe71f8272627a699d/A in TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d. 2024-11-27T16:21:34,507 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/A/002f374727ed434abad4fdc81c620220, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/A/3407a8a656b8441e85075602cd8a4aae, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/A/dd3d70498f42464987b8c669c59f8326] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/.tmp, totalSize=99.1 K 2024-11-27T16:21:34,507 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d. 2024-11-27T16:21:34,507 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d. files: [hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/A/002f374727ed434abad4fdc81c620220, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/A/3407a8a656b8441e85075602cd8a4aae, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/A/dd3d70498f42464987b8c669c59f8326] 2024-11-27T16:21:34,507 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-27T16:21:34,508 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HStore(1540): 0eeb0588edb2caebe71f8272627a699d/B is initiating minor compaction (all files) 2024-11-27T16:21:34,508 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 0eeb0588edb2caebe71f8272627a699d/B in TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d. 2024-11-27T16:21:34,508 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/B/55f562718c5948c092bda235c76c99b1, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/B/c6aad0e13f824890a3e7d2b54cb5ad65, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/B/daae7746fe5042378d4b9c12dc826fcd] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/.tmp, totalSize=35.2 K 2024-11-27T16:21:34,508 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting 55f562718c5948c092bda235c76c99b1, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=16, earliestPutTs=1732724492468 2024-11-27T16:21:34,508 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 002f374727ed434abad4fdc81c620220, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=16, earliestPutTs=1732724492468 2024-11-27T16:21:34,509 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting c6aad0e13f824890a3e7d2b54cb5ad65, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=42, earliestPutTs=1732724492500 2024-11-27T16:21:34,509 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3407a8a656b8441e85075602cd8a4aae, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=42, earliestPutTs=1732724492500 2024-11-27T16:21:34,509 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting daae7746fe5042378d4b9c12dc826fcd, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=53, earliestPutTs=1732724493636 2024-11-27T16:21:34,510 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.Compactor(224): Compacting dd3d70498f42464987b8c669c59f8326, keycount=200, bloomtype=ROW, size=38.6 K, encoding=NONE, compression=NONE, seqNum=53, earliestPutTs=1732724493636 2024-11-27T16:21:34,513 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 0eeb0588edb2caebe71f8272627a699d:B, priority=-2147483648, current under compaction store size is 2 2024-11-27T16:21:34,513 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T16:21:34,513 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 0eeb0588edb2caebe71f8272627a699d:C, priority=-2147483648, current under compaction store size is 3 2024-11-27T16:21:34,513 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-27T16:21:34,532 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 0eeb0588edb2caebe71f8272627a699d#B#compaction#129 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-27T16:21:34,532 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/.tmp/B/5b9a2f2abdd54afd823d7f8bda8f6a21 is 50, key is test_row_0/B:col10/1732724493637/Put/seqid=0 2024-11-27T16:21:34,541 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=0eeb0588edb2caebe71f8272627a699d] 2024-11-27T16:21:34,549 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241127f79959bda28d4dd2af2b3187f41be60b_0eeb0588edb2caebe71f8272627a699d store=[table=TestAcidGuarantees family=A region=0eeb0588edb2caebe71f8272627a699d] 2024-11-27T16:21:34,556 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241127f79959bda28d4dd2af2b3187f41be60b_0eeb0588edb2caebe71f8272627a699d, store=[table=TestAcidGuarantees family=A region=0eeb0588edb2caebe71f8272627a699d] 2024-11-27T16:21:34,556 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241127f79959bda28d4dd2af2b3187f41be60b_0eeb0588edb2caebe71f8272627a699d because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=0eeb0588edb2caebe71f8272627a699d] 2024-11-27T16:21:34,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=47 2024-11-27T16:21:34,562 INFO [Thread-681 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 47 completed 2024-11-27T16:21:34,564 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-27T16:21:34,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] procedure2.ProcedureExecutor(1098): Stored pid=49, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=49, table=TestAcidGuarantees 2024-11-27T16:21:34,567 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=49, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=49, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-27T16:21:34,568 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=49, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=49, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-27T16:21:34,568 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=50, ppid=49, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-27T16:21:34,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49 2024-11-27T16:21:34,579 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073741974_1150 (size=12104) 2024-11-27T16:21:34,588 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/.tmp/B/5b9a2f2abdd54afd823d7f8bda8f6a21 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/B/5b9a2f2abdd54afd823d7f8bda8f6a21 2024-11-27T16:21:34,599 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 0eeb0588edb2caebe71f8272627a699d/B of 0eeb0588edb2caebe71f8272627a699d into 5b9a2f2abdd54afd823d7f8bda8f6a21(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T16:21:34,599 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 0eeb0588edb2caebe71f8272627a699d: 2024-11-27T16:21:34,599 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d., storeName=0eeb0588edb2caebe71f8272627a699d/B, priority=13, startTime=1732724494505; duration=0sec 2024-11-27T16:21:34,599 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-27T16:21:34,599 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 0eeb0588edb2caebe71f8272627a699d:B 2024-11-27T16:21:34,599 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-27T16:21:34,602 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-27T16:21:34,602 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HStore(1540): 0eeb0588edb2caebe71f8272627a699d/C is initiating minor compaction (all files) 2024-11-27T16:21:34,602 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 0eeb0588edb2caebe71f8272627a699d/C in TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d. 2024-11-27T16:21:34,602 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/C/57fa462da93141e1931551e7f00628e8, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/C/ffdb929f95d34f5bbc3e4310191d39c4, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/C/ae6825c059f048588118b31bdde03149] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/.tmp, totalSize=35.2 K 2024-11-27T16:21:34,603 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting 57fa462da93141e1931551e7f00628e8, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=16, earliestPutTs=1732724492468 2024-11-27T16:21:34,604 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting ffdb929f95d34f5bbc3e4310191d39c4, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=42, earliestPutTs=1732724492500 2024-11-27T16:21:34,604 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting ae6825c059f048588118b31bdde03149, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=53, earliestPutTs=1732724493636 2024-11-27T16:21:34,630 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073741975_1151 (size=4469) 2024-11-27T16:21:34,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(8581): Flush requested on 0eeb0588edb2caebe71f8272627a699d 2024-11-27T16:21:34,644 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 0eeb0588edb2caebe71f8272627a699d 3/3 column families, dataSize=161.02 KB heapSize=422.63 KB 2024-11-27T16:21:34,644 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 0eeb0588edb2caebe71f8272627a699d#C#compaction#131 average throughput is 0.94 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-27T16:21:34,645 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/.tmp/C/0783c9d1842a40d097e999bb82466084 is 50, key is test_row_0/C:col10/1732724493637/Put/seqid=0 2024-11-27T16:21:34,645 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0eeb0588edb2caebe71f8272627a699d, store=A 2024-11-27T16:21:34,645 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:21:34,645 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0eeb0588edb2caebe71f8272627a699d, store=B 2024-11-27T16:21:34,645 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:21:34,645 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0eeb0588edb2caebe71f8272627a699d, store=C 2024-11-27T16:21:34,646 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:21:34,660 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:34,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39168 deadline: 1732724554654, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:34,661 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:34,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39208 deadline: 1732724554655, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:34,661 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:34,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39180 deadline: 1732724554655, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:34,662 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:34,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39228 deadline: 1732724554656, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:34,662 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:34,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39264 deadline: 1732724554656, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:34,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49 2024-11-27T16:21:34,680 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073741976_1152 (size=12104) 2024-11-27T16:21:34,704 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411273ce063eebbe74b41bfc80a429b921f49_0eeb0588edb2caebe71f8272627a699d is 50, key is test_row_0/A:col10/1732724494641/Put/seqid=0 2024-11-27T16:21:34,720 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:21:34,721 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=50 2024-11-27T16:21:34,721 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d. 2024-11-27T16:21:34,721 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d. as already flushing 2024-11-27T16:21:34,721 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d. 2024-11-27T16:21:34,721 ERROR [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] handler.RSProcedureHandler(58): pid=50 java.io.IOException: Unable to complete flush {ENCODED => 0eeb0588edb2caebe71f8272627a699d, NAME => 'TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:21:34,722 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=50 java.io.IOException: Unable to complete flush {ENCODED => 0eeb0588edb2caebe71f8272627a699d, NAME => 'TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:21:34,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4114): Remote procedure failed, pid=50 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0eeb0588edb2caebe71f8272627a699d, NAME => 'TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0eeb0588edb2caebe71f8272627a699d, NAME => 'TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:21:34,734 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073741977_1153 (size=14594) 2024-11-27T16:21:34,736 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,743 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411273ce063eebbe74b41bfc80a429b921f49_0eeb0588edb2caebe71f8272627a699d to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411273ce063eebbe74b41bfc80a429b921f49_0eeb0588edb2caebe71f8272627a699d 2024-11-27T16:21:34,745 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/.tmp/A/7c3574c93c434940b7b3cca27a2bfc82, store: [table=TestAcidGuarantees family=A region=0eeb0588edb2caebe71f8272627a699d] 2024-11-27T16:21:34,746 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/.tmp/A/7c3574c93c434940b7b3cca27a2bfc82 is 175, key is test_row_0/A:col10/1732724494641/Put/seqid=0 2024-11-27T16:21:34,757 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073741978_1154 (size=39549) 2024-11-27T16:21:34,759 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=82, memsize=55.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/.tmp/A/7c3574c93c434940b7b3cca27a2bfc82 2024-11-27T16:21:34,766 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:34,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39208 deadline: 1732724554762, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:34,767 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:34,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39168 deadline: 1732724554765, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:34,767 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:34,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39228 deadline: 1732724554763, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:34,768 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:34,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39180 deadline: 1732724554765, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:34,768 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:34,771 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/.tmp/B/01ae8ba93d15406486239b0220cd75a4 is 50, key is test_row_0/B:col10/1732724494641/Put/seqid=0 2024-11-27T16:21:34,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39264 deadline: 1732724554765, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:34,780 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073741979_1155 (size=12001) 2024-11-27T16:21:34,781 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=82 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/.tmp/B/01ae8ba93d15406486239b0220cd75a4 2024-11-27T16:21:34,794 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/.tmp/C/7facee1695d94554bb83f94a730d0a24 is 50, key is test_row_0/C:col10/1732724494641/Put/seqid=0 2024-11-27T16:21:34,803 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073741980_1156 (size=12001) 2024-11-27T16:21:34,804 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=82 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/.tmp/C/7facee1695d94554bb83f94a730d0a24 2024-11-27T16:21:34,810 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/.tmp/A/7c3574c93c434940b7b3cca27a2bfc82 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/A/7c3574c93c434940b7b3cca27a2bfc82 2024-11-27T16:21:34,815 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/A/7c3574c93c434940b7b3cca27a2bfc82, entries=200, sequenceid=82, filesize=38.6 K 2024-11-27T16:21:34,816 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/.tmp/B/01ae8ba93d15406486239b0220cd75a4 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/B/01ae8ba93d15406486239b0220cd75a4 2024-11-27T16:21:34,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,821 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/B/01ae8ba93d15406486239b0220cd75a4, entries=150, sequenceid=82, filesize=11.7 K 2024-11-27T16:21:34,823 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/.tmp/C/7facee1695d94554bb83f94a730d0a24 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/C/7facee1695d94554bb83f94a730d0a24 2024-11-27T16:21:34,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,829 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/C/7facee1695d94554bb83f94a730d0a24, entries=150, sequenceid=82, filesize=11.7 K 2024-11-27T16:21:34,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,830 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~167.72 KB/171750, heapSize ~440.16 KB/450720, currentSize=40.25 KB/41220 for 0eeb0588edb2caebe71f8272627a699d in 187ms, sequenceid=82, compaction requested=false 2024-11-27T16:21:34,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,830 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 0eeb0588edb2caebe71f8272627a699d: 2024-11-27T16:21:34,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49 2024-11-27T16:21:34,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,876 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:21:34,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,876 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=50 2024-11-27T16:21:34,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,877 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d. 2024-11-27T16:21:34,877 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2837): Flushing 0eeb0588edb2caebe71f8272627a699d 3/3 column families, dataSize=40.25 KB heapSize=106.22 KB 2024-11-27T16:21:34,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,877 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0eeb0588edb2caebe71f8272627a699d, store=A 2024-11-27T16:21:34,877 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:21:34,877 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0eeb0588edb2caebe71f8272627a699d, store=B 2024-11-27T16:21:34,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,877 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:21:34,877 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0eeb0588edb2caebe71f8272627a699d, store=C 2024-11-27T16:21:34,878 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:21:34,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,890 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241127a90cee46f3b544b6bd184d641c17c2bd_0eeb0588edb2caebe71f8272627a699d is 50, key is test_row_0/A:col10/1732724494655/Put/seqid=0 2024-11-27T16:21:34,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,929 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073741981_1157 (size=9714) 2024-11-27T16:21:34,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,930 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,939 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241127a90cee46f3b544b6bd184d641c17c2bd_0eeb0588edb2caebe71f8272627a699d to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241127a90cee46f3b544b6bd184d641c17c2bd_0eeb0588edb2caebe71f8272627a699d 2024-11-27T16:21:34,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,940 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/.tmp/A/358b3a5864a64b51b2b2faf90d1e6aa7, store: [table=TestAcidGuarantees family=A region=0eeb0588edb2caebe71f8272627a699d] 2024-11-27T16:21:34,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,941 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/.tmp/A/358b3a5864a64b51b2b2faf90d1e6aa7 is 175, key is test_row_0/A:col10/1732724494655/Put/seqid=0 2024-11-27T16:21:34,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,972 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073741982_1158 (size=22361) 2024-11-27T16:21:34,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,974 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=91, memsize=13.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/.tmp/A/358b3a5864a64b51b2b2faf90d1e6aa7 2024-11-27T16:21:34,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:34,988 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/.tmp/B/a60af2ae54684ba186a550755638f4d4 is 50, key is test_row_0/B:col10/1732724494655/Put/seqid=0 2024-11-27T16:21:34,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(8581): Flush requested on 0eeb0588edb2caebe71f8272627a699d 2024-11-27T16:21:34,993 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d. as already flushing 2024-11-27T16:21:35,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:35,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:35,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:35,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:35,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:35,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:35,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:35,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:35,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:35,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:35,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:35,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:35,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:35,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:35,019 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073741983_1159 (size=9657) 2024-11-27T16:21:35,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:35,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:35,021 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.42 KB at sequenceid=91 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/.tmp/B/a60af2ae54684ba186a550755638f4d4 2024-11-27T16:21:35,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:35,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:35,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:35,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:35,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:35,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:35,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:35,032 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 0eeb0588edb2caebe71f8272627a699d#A#compaction#130 average throughput is 0.05 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-27T16:21:35,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:35,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:35,035 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/.tmp/A/4507d6363c254a5f93ed29182618c1b4 is 175, key is test_row_0/A:col10/1732724493637/Put/seqid=0 2024-11-27T16:21:35,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:35,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:35,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:35,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:35,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:35,042 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/.tmp/C/faca2b94d10f40ccb5602dffe3721f3d is 50, key is test_row_0/C:col10/1732724494655/Put/seqid=0 2024-11-27T16:21:35,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:35,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:35,050 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:35,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39168 deadline: 1732724555042, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:35,055 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:35,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39180 deadline: 1732724555049, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:35,055 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:35,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39208 deadline: 1732724555050, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:35,056 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:35,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39264 deadline: 1732724555050, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:35,057 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:35,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39228 deadline: 1732724555051, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:35,072 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073741985_1161 (size=9657) 2024-11-27T16:21:35,073 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.42 KB at sequenceid=91 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/.tmp/C/faca2b94d10f40ccb5602dffe3721f3d 2024-11-27T16:21:35,085 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/.tmp/A/358b3a5864a64b51b2b2faf90d1e6aa7 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/A/358b3a5864a64b51b2b2faf90d1e6aa7 2024-11-27T16:21:35,091 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/A/358b3a5864a64b51b2b2faf90d1e6aa7, entries=100, sequenceid=91, filesize=21.8 K 2024-11-27T16:21:35,093 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/.tmp/B/a60af2ae54684ba186a550755638f4d4 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/B/a60af2ae54684ba186a550755638f4d4 2024-11-27T16:21:35,095 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073741984_1160 (size=31058) 2024-11-27T16:21:35,095 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/.tmp/C/0783c9d1842a40d097e999bb82466084 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/C/0783c9d1842a40d097e999bb82466084 2024-11-27T16:21:35,133 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 0eeb0588edb2caebe71f8272627a699d/C of 0eeb0588edb2caebe71f8272627a699d into 0783c9d1842a40d097e999bb82466084(size=11.8 K), total size for store is 23.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T16:21:35,133 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 0eeb0588edb2caebe71f8272627a699d: 2024-11-27T16:21:35,133 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d., storeName=0eeb0588edb2caebe71f8272627a699d/C, priority=13, startTime=1732724494513; duration=0sec 2024-11-27T16:21:35,133 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T16:21:35,134 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 0eeb0588edb2caebe71f8272627a699d:C 2024-11-27T16:21:35,135 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/B/a60af2ae54684ba186a550755638f4d4, entries=100, sequenceid=91, filesize=9.4 K 2024-11-27T16:21:35,137 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/.tmp/A/4507d6363c254a5f93ed29182618c1b4 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/A/4507d6363c254a5f93ed29182618c1b4 2024-11-27T16:21:35,137 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/.tmp/C/faca2b94d10f40ccb5602dffe3721f3d as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/C/faca2b94d10f40ccb5602dffe3721f3d 2024-11-27T16:21:35,150 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 0eeb0588edb2caebe71f8272627a699d/A of 0eeb0588edb2caebe71f8272627a699d into 4507d6363c254a5f93ed29182618c1b4(size=30.3 K), total size for store is 90.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T16:21:35,150 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 0eeb0588edb2caebe71f8272627a699d: 2024-11-27T16:21:35,150 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d., storeName=0eeb0588edb2caebe71f8272627a699d/A, priority=13, startTime=1732724494504; duration=0sec 2024-11-27T16:21:35,150 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T16:21:35,150 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 0eeb0588edb2caebe71f8272627a699d:A 2024-11-27T16:21:35,154 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/C/faca2b94d10f40ccb5602dffe3721f3d, entries=100, sequenceid=91, filesize=9.4 K 2024-11-27T16:21:35,157 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(3040): Finished flush of dataSize ~40.25 KB/41220, heapSize ~106.17 KB/108720, currentSize=161.02 KB/164880 for 0eeb0588edb2caebe71f8272627a699d in 279ms, sequenceid=91, compaction requested=true 2024-11-27T16:21:35,157 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2538): Flush status journal for 0eeb0588edb2caebe71f8272627a699d: 2024-11-27T16:21:35,158 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d. 2024-11-27T16:21:35,158 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=50 2024-11-27T16:21:35,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4106): Remote procedure done, pid=50 2024-11-27T16:21:35,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(8581): Flush requested on 0eeb0588edb2caebe71f8272627a699d 2024-11-27T16:21:35,162 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 0eeb0588edb2caebe71f8272627a699d 3/3 column families, dataSize=167.72 KB heapSize=440.20 KB 2024-11-27T16:21:35,164 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0eeb0588edb2caebe71f8272627a699d, store=A 2024-11-27T16:21:35,164 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:21:35,164 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0eeb0588edb2caebe71f8272627a699d, store=B 2024-11-27T16:21:35,164 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:21:35,164 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0eeb0588edb2caebe71f8272627a699d, store=C 2024-11-27T16:21:35,165 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:21:35,166 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=50, resume processing ppid=49 2024-11-27T16:21:35,166 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=50, ppid=49, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 592 msec 2024-11-27T16:21:35,169 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=49, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=49, table=TestAcidGuarantees in 603 msec 2024-11-27T16:21:35,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49 2024-11-27T16:21:35,172 INFO [Thread-681 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 49 completed 2024-11-27T16:21:35,174 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-27T16:21:35,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] procedure2.ProcedureExecutor(1098): Stored pid=51, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=51, table=TestAcidGuarantees 2024-11-27T16:21:35,176 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=51, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=51, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-27T16:21:35,177 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=51, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=51, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-27T16:21:35,176 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:35,177 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=52, ppid=51, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-27T16:21:35,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39180 deadline: 1732724555164, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:35,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=51 2024-11-27T16:21:35,177 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:35,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39208 deadline: 1732724555166, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:35,183 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:35,186 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:35,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39228 deadline: 1732724555174, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:35,189 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:35,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39168 deadline: 1732724555188, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:35,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39264 deadline: 1732724555175, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:35,209 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112780b52c9e2f824935b1c88aed6157a838_0eeb0588edb2caebe71f8272627a699d is 50, key is test_row_0/A:col10/1732724495159/Put/seqid=0 2024-11-27T16:21:35,250 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073741986_1162 (size=12154) 2024-11-27T16:21:35,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=51 2024-11-27T16:21:35,281 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:35,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39180 deadline: 1732724555278, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:35,282 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:35,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39208 deadline: 1732724555279, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:35,293 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:35,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39228 deadline: 1732724555289, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:35,298 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:35,298 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:35,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39168 deadline: 1732724555291, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:35,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39264 deadline: 1732724555296, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:35,329 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:21:35,330 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=52 2024-11-27T16:21:35,330 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d. 2024-11-27T16:21:35,330 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d. as already flushing 2024-11-27T16:21:35,330 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d. 2024-11-27T16:21:35,330 ERROR [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] handler.RSProcedureHandler(58): pid=52 java.io.IOException: Unable to complete flush {ENCODED => 0eeb0588edb2caebe71f8272627a699d, NAME => 'TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:21:35,331 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=52 java.io.IOException: Unable to complete flush {ENCODED => 0eeb0588edb2caebe71f8272627a699d, NAME => 'TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:21:35,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4114): Remote procedure failed, pid=52 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0eeb0588edb2caebe71f8272627a699d, NAME => 'TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0eeb0588edb2caebe71f8272627a699d, NAME => 'TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:21:35,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=51 2024-11-27T16:21:35,484 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:21:35,486 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:35,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39180 deadline: 1732724555485, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:35,487 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:35,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39208 deadline: 1732724555485, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:35,500 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=52 2024-11-27T16:21:35,500 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d. 2024-11-27T16:21:35,500 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d. as already flushing 2024-11-27T16:21:35,501 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d. 2024-11-27T16:21:35,501 ERROR [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] handler.RSProcedureHandler(58): pid=52 java.io.IOException: Unable to complete flush {ENCODED => 0eeb0588edb2caebe71f8272627a699d, NAME => 'TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:21:35,501 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=52 java.io.IOException: Unable to complete flush {ENCODED => 0eeb0588edb2caebe71f8272627a699d, NAME => 'TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:21:35,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4114): Remote procedure failed, pid=52 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0eeb0588edb2caebe71f8272627a699d, NAME => 'TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0eeb0588edb2caebe71f8272627a699d, NAME => 'TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:21:35,503 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:35,503 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:35,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39264 deadline: 1732724555500, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:35,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39228 deadline: 1732724555500, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:35,504 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:35,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39168 deadline: 1732724555501, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:35,652 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:35,653 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:21:35,654 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=52 2024-11-27T16:21:35,654 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d. 2024-11-27T16:21:35,654 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d. as already flushing 2024-11-27T16:21:35,654 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d. 2024-11-27T16:21:35,655 ERROR [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] handler.RSProcedureHandler(58): pid=52 java.io.IOException: Unable to complete flush {ENCODED => 0eeb0588edb2caebe71f8272627a699d, NAME => 'TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:21:35,655 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=52 java.io.IOException: Unable to complete flush {ENCODED => 0eeb0588edb2caebe71f8272627a699d, NAME => 'TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:21:35,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4114): Remote procedure failed, pid=52 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0eeb0588edb2caebe71f8272627a699d, NAME => 'TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0eeb0588edb2caebe71f8272627a699d, NAME => 'TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:21:35,660 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112780b52c9e2f824935b1c88aed6157a838_0eeb0588edb2caebe71f8272627a699d to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112780b52c9e2f824935b1c88aed6157a838_0eeb0588edb2caebe71f8272627a699d 2024-11-27T16:21:35,661 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/.tmp/A/f42a0578bdb94064af7aef481f16fec2, store: [table=TestAcidGuarantees family=A region=0eeb0588edb2caebe71f8272627a699d] 2024-11-27T16:21:35,662 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/.tmp/A/f42a0578bdb94064af7aef481f16fec2 is 175, key is test_row_0/A:col10/1732724495159/Put/seqid=0 2024-11-27T16:21:35,681 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073741987_1163 (size=30955) 2024-11-27T16:21:35,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=51 2024-11-27T16:21:35,790 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:35,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39180 deadline: 1732724555788, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:35,791 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:35,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39208 deadline: 1732724555789, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:35,806 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:35,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39228 deadline: 1732724555806, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:35,807 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:21:35,807 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=52 2024-11-27T16:21:35,808 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d. 2024-11-27T16:21:35,808 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d. as already flushing 2024-11-27T16:21:35,808 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d. 2024-11-27T16:21:35,808 ERROR [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] handler.RSProcedureHandler(58): pid=52 java.io.IOException: Unable to complete flush {ENCODED => 0eeb0588edb2caebe71f8272627a699d, NAME => 'TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:21:35,808 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=52 java.io.IOException: Unable to complete flush {ENCODED => 0eeb0588edb2caebe71f8272627a699d, NAME => 'TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:21:35,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4114): Remote procedure failed, pid=52 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0eeb0588edb2caebe71f8272627a699d, NAME => 'TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0eeb0588edb2caebe71f8272627a699d, NAME => 'TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:21:35,810 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:35,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39168 deadline: 1732724555806, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:35,810 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:35,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39264 deadline: 1732724555807, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:35,960 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:21:35,961 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=52 2024-11-27T16:21:35,961 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d. 2024-11-27T16:21:35,961 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d. as already flushing 2024-11-27T16:21:35,961 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d. 2024-11-27T16:21:35,961 ERROR [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] handler.RSProcedureHandler(58): pid=52 java.io.IOException: Unable to complete flush {ENCODED => 0eeb0588edb2caebe71f8272627a699d, NAME => 'TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:21:35,962 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=52 java.io.IOException: Unable to complete flush {ENCODED => 0eeb0588edb2caebe71f8272627a699d, NAME => 'TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:21:35,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4114): Remote procedure failed, pid=52 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0eeb0588edb2caebe71f8272627a699d, NAME => 'TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0eeb0588edb2caebe71f8272627a699d, NAME => 'TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:21:36,082 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=123, memsize=60.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/.tmp/A/f42a0578bdb94064af7aef481f16fec2 2024-11-27T16:21:36,093 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/.tmp/B/a769f4e1bde642148f5529cc773eeb73 is 50, key is test_row_0/B:col10/1732724495159/Put/seqid=0 2024-11-27T16:21:36,112 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073741988_1164 (size=12001) 2024-11-27T16:21:36,114 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:21:36,115 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=52 2024-11-27T16:21:36,115 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d. 2024-11-27T16:21:36,115 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d. as already flushing 2024-11-27T16:21:36,115 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d. 2024-11-27T16:21:36,115 ERROR [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] handler.RSProcedureHandler(58): pid=52 java.io.IOException: Unable to complete flush {ENCODED => 0eeb0588edb2caebe71f8272627a699d, NAME => 'TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:21:36,115 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=52 java.io.IOException: Unable to complete flush {ENCODED => 0eeb0588edb2caebe71f8272627a699d, NAME => 'TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:21:36,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4114): Remote procedure failed, pid=52 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0eeb0588edb2caebe71f8272627a699d, NAME => 'TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0eeb0588edb2caebe71f8272627a699d, NAME => 'TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:21:36,268 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:21:36,272 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=52 2024-11-27T16:21:36,272 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d. 2024-11-27T16:21:36,272 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d. as already flushing 2024-11-27T16:21:36,272 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d. 2024-11-27T16:21:36,272 ERROR [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] handler.RSProcedureHandler(58): pid=52 java.io.IOException: Unable to complete flush {ENCODED => 0eeb0588edb2caebe71f8272627a699d, NAME => 'TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:21:36,272 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=52 java.io.IOException: Unable to complete flush {ENCODED => 0eeb0588edb2caebe71f8272627a699d, NAME => 'TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:21:36,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4114): Remote procedure failed, pid=52 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0eeb0588edb2caebe71f8272627a699d, NAME => 'TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0eeb0588edb2caebe71f8272627a699d, NAME => 'TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:21:36,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=51 2024-11-27T16:21:36,294 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:36,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39180 deadline: 1732724556292, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:36,295 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:36,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39208 deadline: 1732724556294, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:36,311 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:36,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39228 deadline: 1732724556310, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:36,316 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:36,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39264 deadline: 1732724556315, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:36,317 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:36,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39168 deadline: 1732724556315, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:36,424 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:21:36,425 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=52 2024-11-27T16:21:36,425 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d. 2024-11-27T16:21:36,425 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d. as already flushing 2024-11-27T16:21:36,425 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d. 2024-11-27T16:21:36,425 ERROR [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] handler.RSProcedureHandler(58): pid=52 java.io.IOException: Unable to complete flush {ENCODED => 0eeb0588edb2caebe71f8272627a699d, NAME => 'TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:21:36,426 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=52 java.io.IOException: Unable to complete flush {ENCODED => 0eeb0588edb2caebe71f8272627a699d, NAME => 'TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:21:36,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4114): Remote procedure failed, pid=52 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0eeb0588edb2caebe71f8272627a699d, NAME => 'TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0eeb0588edb2caebe71f8272627a699d, NAME => 'TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:21:36,513 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=60.38 KB at sequenceid=123 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/.tmp/B/a769f4e1bde642148f5529cc773eeb73 2024-11-27T16:21:36,530 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/.tmp/C/31c8f9ae1b2741c686a1d565938751b1 is 50, key is test_row_0/C:col10/1732724495159/Put/seqid=0 2024-11-27T16:21:36,551 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073741989_1165 (size=12001) 2024-11-27T16:21:36,578 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:21:36,580 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=52 2024-11-27T16:21:36,580 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d. 2024-11-27T16:21:36,580 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d. as already flushing 2024-11-27T16:21:36,581 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d. 2024-11-27T16:21:36,581 ERROR [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] handler.RSProcedureHandler(58): pid=52 java.io.IOException: Unable to complete flush {ENCODED => 0eeb0588edb2caebe71f8272627a699d, NAME => 'TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:21:36,581 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=52 java.io.IOException: Unable to complete flush {ENCODED => 0eeb0588edb2caebe71f8272627a699d, NAME => 'TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:21:36,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4114): Remote procedure failed, pid=52 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0eeb0588edb2caebe71f8272627a699d, NAME => 'TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0eeb0588edb2caebe71f8272627a699d, NAME => 'TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:21:36,733 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:21:36,734 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=52 2024-11-27T16:21:36,734 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d. 2024-11-27T16:21:36,734 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d. as already flushing 2024-11-27T16:21:36,734 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d. 2024-11-27T16:21:36,735 ERROR [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] handler.RSProcedureHandler(58): pid=52 java.io.IOException: Unable to complete flush {ENCODED => 0eeb0588edb2caebe71f8272627a699d, NAME => 'TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:21:36,735 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=52 java.io.IOException: Unable to complete flush {ENCODED => 0eeb0588edb2caebe71f8272627a699d, NAME => 'TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:21:36,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4114): Remote procedure failed, pid=52 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0eeb0588edb2caebe71f8272627a699d, NAME => 'TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0eeb0588edb2caebe71f8272627a699d, NAME => 'TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:21:36,887 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:21:36,888 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=52 2024-11-27T16:21:36,888 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d. 2024-11-27T16:21:36,888 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d. as already flushing 2024-11-27T16:21:36,888 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d. 2024-11-27T16:21:36,888 ERROR [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] handler.RSProcedureHandler(58): pid=52 java.io.IOException: Unable to complete flush {ENCODED => 0eeb0588edb2caebe71f8272627a699d, NAME => 'TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:21:36,888 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=52 java.io.IOException: Unable to complete flush {ENCODED => 0eeb0588edb2caebe71f8272627a699d, NAME => 'TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:21:36,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4114): Remote procedure failed, pid=52 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0eeb0588edb2caebe71f8272627a699d, NAME => 'TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0eeb0588edb2caebe71f8272627a699d, NAME => 'TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:21:36,953 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=60.38 KB at sequenceid=123 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/.tmp/C/31c8f9ae1b2741c686a1d565938751b1 2024-11-27T16:21:36,958 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/.tmp/A/f42a0578bdb94064af7aef481f16fec2 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/A/f42a0578bdb94064af7aef481f16fec2 2024-11-27T16:21:36,964 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/A/f42a0578bdb94064af7aef481f16fec2, entries=150, sequenceid=123, filesize=30.2 K 2024-11-27T16:21:36,964 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreScanner(1000): StoreScanner already closing. There is no need to updateReaders 2024-11-27T16:21:36,966 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/.tmp/B/a769f4e1bde642148f5529cc773eeb73 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/B/a769f4e1bde642148f5529cc773eeb73 2024-11-27T16:21:36,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:36,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:36,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:36,970 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/B/a769f4e1bde642148f5529cc773eeb73, entries=150, sequenceid=123, filesize=11.7 K 2024-11-27T16:21:36,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:36,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:36,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:36,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:36,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:36,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:36,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:36,972 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/.tmp/C/31c8f9ae1b2741c686a1d565938751b1 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/C/31c8f9ae1b2741c686a1d565938751b1 2024-11-27T16:21:36,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:36,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:36,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:36,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:36,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:36,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:36,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:36,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:36,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:36,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:36,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:36,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:36,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:36,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:36,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:36,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:36,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:36,977 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/C/31c8f9ae1b2741c686a1d565938751b1, entries=150, sequenceid=123, filesize=11.7 K 2024-11-27T16:21:36,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:36,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:36,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:36,979 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~181.14 KB/185490, heapSize ~475.31 KB/486720, currentSize=20.13 KB/20610 for 0eeb0588edb2caebe71f8272627a699d in 1817ms, sequenceid=123, compaction requested=true 2024-11-27T16:21:36,979 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 0eeb0588edb2caebe71f8272627a699d: 2024-11-27T16:21:36,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:36,979 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 0eeb0588edb2caebe71f8272627a699d:A, priority=-2147483648, current under compaction store size is 1 2024-11-27T16:21:36,979 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-27T16:21:36,979 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T16:21:36,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:36,979 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 0eeb0588edb2caebe71f8272627a699d:B, priority=-2147483648, current under compaction store size is 2 2024-11-27T16:21:36,979 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-27T16:21:36,979 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 0eeb0588edb2caebe71f8272627a699d:C, priority=-2147483648, current under compaction store size is 3 2024-11-27T16:21:36,979 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-11-27T16:21:36,979 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-27T16:21:36,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:36,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:36,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:36,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:36,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:36,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:36,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:36,981 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 123923 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-27T16:21:36,982 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HStore(1540): 0eeb0588edb2caebe71f8272627a699d/A is initiating minor compaction (all files) 2024-11-27T16:21:36,982 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 0eeb0588edb2caebe71f8272627a699d/A in TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d. 2024-11-27T16:21:36,982 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/A/4507d6363c254a5f93ed29182618c1b4, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/A/7c3574c93c434940b7b3cca27a2bfc82, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/A/358b3a5864a64b51b2b2faf90d1e6aa7, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/A/f42a0578bdb94064af7aef481f16fec2] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/.tmp, totalSize=121.0 K 2024-11-27T16:21:36,982 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=12 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d. 2024-11-27T16:21:36,982 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d. files: [hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/A/4507d6363c254a5f93ed29182618c1b4, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/A/7c3574c93c434940b7b3cca27a2bfc82, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/A/358b3a5864a64b51b2b2faf90d1e6aa7, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/A/f42a0578bdb94064af7aef481f16fec2] 2024-11-27T16:21:36,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:36,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:36,983 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 45763 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-27T16:21:36,983 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HStore(1540): 0eeb0588edb2caebe71f8272627a699d/B is initiating minor compaction (all files) 2024-11-27T16:21:36,983 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 0eeb0588edb2caebe71f8272627a699d/B in TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d. 2024-11-27T16:21:36,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:36,983 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/B/5b9a2f2abdd54afd823d7f8bda8f6a21, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/B/01ae8ba93d15406486239b0220cd75a4, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/B/a60af2ae54684ba186a550755638f4d4, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/B/a769f4e1bde642148f5529cc773eeb73] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/.tmp, totalSize=44.7 K 2024-11-27T16:21:36,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:36,983 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4507d6363c254a5f93ed29182618c1b4, keycount=150, bloomtype=ROW, size=30.3 K, encoding=NONE, compression=NONE, seqNum=53, earliestPutTs=1732724493636 2024-11-27T16:21:36,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:36,984 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting 5b9a2f2abdd54afd823d7f8bda8f6a21, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=53, earliestPutTs=1732724493636 2024-11-27T16:21:36,984 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7c3574c93c434940b7b3cca27a2bfc82, keycount=200, bloomtype=ROW, size=38.6 K, encoding=NONE, compression=NONE, seqNum=82, earliestPutTs=1732724494322 2024-11-27T16:21:36,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:36,984 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting 01ae8ba93d15406486239b0220cd75a4, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=82, earliestPutTs=1732724494641 2024-11-27T16:21:36,985 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 358b3a5864a64b51b2b2faf90d1e6aa7, keycount=100, bloomtype=ROW, size=21.8 K, encoding=NONE, compression=NONE, seqNum=91, earliestPutTs=1732724494654 2024-11-27T16:21:36,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:36,985 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting a60af2ae54684ba186a550755638f4d4, keycount=100, bloomtype=ROW, size=9.4 K, encoding=NONE, compression=NONE, seqNum=91, earliestPutTs=1732724494654 2024-11-27T16:21:36,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:36,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:36,985 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.Compactor(224): Compacting f42a0578bdb94064af7aef481f16fec2, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=123, earliestPutTs=1732724495049 2024-11-27T16:21:36,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:36,986 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting a769f4e1bde642148f5529cc773eeb73, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=123, earliestPutTs=1732724495049 2024-11-27T16:21:36,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:36,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:36,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:36,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:36,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:36,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:36,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:36,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:36,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:36,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:36,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:36,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:36,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:36,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:36,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:36,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:36,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:36,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:36,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:36,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:36,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:36,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:36,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:36,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:36,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:36,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:36,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:36,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:36,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:36,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:36,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:36,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,001 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=0eeb0588edb2caebe71f8272627a699d] 2024-11-27T16:21:37,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,005 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 0eeb0588edb2caebe71f8272627a699d#B#compaction#142 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-27T16:21:37,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,005 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241127007f21d74cf44753b3569f39cbe33bb6_0eeb0588edb2caebe71f8272627a699d store=[table=TestAcidGuarantees family=A region=0eeb0588edb2caebe71f8272627a699d] 2024-11-27T16:21:37,005 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/.tmp/B/a4e282ab441f485daffbc548800448dc is 50, key is test_row_0/B:col10/1732724495159/Put/seqid=0 2024-11-27T16:21:37,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,008 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241127007f21d74cf44753b3569f39cbe33bb6_0eeb0588edb2caebe71f8272627a699d, store=[table=TestAcidGuarantees family=A region=0eeb0588edb2caebe71f8272627a699d] 2024-11-27T16:21:37,008 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241127007f21d74cf44753b3569f39cbe33bb6_0eeb0588edb2caebe71f8272627a699d because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=0eeb0588edb2caebe71f8272627a699d] 2024-11-27T16:21:37,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,038 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073741990_1166 (size=12241) 2024-11-27T16:21:37,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,042 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073741991_1167 (size=4469) 2024-11-27T16:21:37,041 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:21:37,042 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=52 2024-11-27T16:21:37,043 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d. 2024-11-27T16:21:37,043 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2837): Flushing 0eeb0588edb2caebe71f8272627a699d 3/3 column families, dataSize=20.13 KB heapSize=53.48 KB 2024-11-27T16:21:37,043 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0eeb0588edb2caebe71f8272627a699d, store=A 2024-11-27T16:21:37,044 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:21:37,044 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0eeb0588edb2caebe71f8272627a699d, store=B 2024-11-27T16:21:37,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,044 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:21:37,044 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0eeb0588edb2caebe71f8272627a699d, store=C 2024-11-27T16:21:37,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,044 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:21:37,044 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 0eeb0588edb2caebe71f8272627a699d#A#compaction#141 average throughput is 0.57 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-27T16:21:37,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,045 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/.tmp/A/84dedb33735242dd92d918c6d26bd5fc is 175, key is test_row_0/A:col10/1732724495159/Put/seqid=0 2024-11-27T16:21:37,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,082 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112774a504e677184d61ab5651d265c65e39_0eeb0588edb2caebe71f8272627a699d is 50, key is test_row_0/A:col10/1732724495166/Put/seqid=0 2024-11-27T16:21:37,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,088 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073741992_1168 (size=31195) 2024-11-27T16:21:37,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,127 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073741993_1169 (size=9714) 2024-11-27T16:21:37,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,128 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,134 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112774a504e677184d61ab5651d265c65e39_0eeb0588edb2caebe71f8272627a699d to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112774a504e677184d61ab5651d265c65e39_0eeb0588edb2caebe71f8272627a699d 2024-11-27T16:21:37,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,135 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/.tmp/A/9f3d3d6ed6554879b8f68e8e29589b69, store: [table=TestAcidGuarantees family=A region=0eeb0588edb2caebe71f8272627a699d] 2024-11-27T16:21:37,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,136 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/.tmp/A/9f3d3d6ed6554879b8f68e8e29589b69 is 175, key is test_row_0/A:col10/1732724495166/Put/seqid=0 2024-11-27T16:21:37,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,170 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073741994_1170 (size=22361) 2024-11-27T16:21:37,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=51 2024-11-27T16:21:37,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:37,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(8581): Flush requested on 0eeb0588edb2caebe71f8272627a699d 2024-11-27T16:21:37,336 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d. as already flushing 2024-11-27T16:21:37,374 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:37,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39208 deadline: 1732724557365, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:37,374 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:37,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39180 deadline: 1732724557367, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:37,377 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:37,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39168 deadline: 1732724557370, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:37,378 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:37,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39264 deadline: 1732724557371, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:37,379 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:37,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39228 deadline: 1732724557373, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:37,446 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/.tmp/B/a4e282ab441f485daffbc548800448dc as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/B/a4e282ab441f485daffbc548800448dc 2024-11-27T16:21:37,461 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 0eeb0588edb2caebe71f8272627a699d/B of 0eeb0588edb2caebe71f8272627a699d into a4e282ab441f485daffbc548800448dc(size=12.0 K), total size for store is 12.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T16:21:37,461 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 0eeb0588edb2caebe71f8272627a699d: 2024-11-27T16:21:37,461 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d., storeName=0eeb0588edb2caebe71f8272627a699d/B, priority=12, startTime=1732724496979; duration=0sec 2024-11-27T16:21:37,462 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-27T16:21:37,462 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 0eeb0588edb2caebe71f8272627a699d:B 2024-11-27T16:21:37,462 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-27T16:21:37,464 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 45763 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-27T16:21:37,464 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HStore(1540): 0eeb0588edb2caebe71f8272627a699d/C is initiating minor compaction (all files) 2024-11-27T16:21:37,464 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 0eeb0588edb2caebe71f8272627a699d/C in TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d. 2024-11-27T16:21:37,464 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/C/0783c9d1842a40d097e999bb82466084, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/C/7facee1695d94554bb83f94a730d0a24, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/C/faca2b94d10f40ccb5602dffe3721f3d, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/C/31c8f9ae1b2741c686a1d565938751b1] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/.tmp, totalSize=44.7 K 2024-11-27T16:21:37,465 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting 0783c9d1842a40d097e999bb82466084, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=53, earliestPutTs=1732724493636 2024-11-27T16:21:37,465 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting 7facee1695d94554bb83f94a730d0a24, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=82, earliestPutTs=1732724494641 2024-11-27T16:21:37,466 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting faca2b94d10f40ccb5602dffe3721f3d, keycount=100, bloomtype=ROW, size=9.4 K, encoding=NONE, compression=NONE, seqNum=91, earliestPutTs=1732724494654 2024-11-27T16:21:37,466 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting 31c8f9ae1b2741c686a1d565938751b1, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=123, earliestPutTs=1732724495049 2024-11-27T16:21:37,478 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 0eeb0588edb2caebe71f8272627a699d#C#compaction#144 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-27T16:21:37,478 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/.tmp/C/c15705e064e546e08cccba7c8051a29c is 50, key is test_row_0/C:col10/1732724495159/Put/seqid=0 2024-11-27T16:21:37,479 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:37,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39208 deadline: 1732724557478, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:37,484 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:37,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39168 deadline: 1732724557480, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:37,484 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:37,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39180 deadline: 1732724557481, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:37,484 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:37,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39264 deadline: 1732724557481, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:37,485 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:37,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39228 deadline: 1732724557481, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:37,496 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/.tmp/A/84dedb33735242dd92d918c6d26bd5fc as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/A/84dedb33735242dd92d918c6d26bd5fc 2024-11-27T16:21:37,503 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 0eeb0588edb2caebe71f8272627a699d/A of 0eeb0588edb2caebe71f8272627a699d into 84dedb33735242dd92d918c6d26bd5fc(size=30.5 K), total size for store is 30.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T16:21:37,503 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 0eeb0588edb2caebe71f8272627a699d: 2024-11-27T16:21:37,503 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d., storeName=0eeb0588edb2caebe71f8272627a699d/A, priority=12, startTime=1732724496979; duration=0sec 2024-11-27T16:21:37,503 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T16:21:37,503 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 0eeb0588edb2caebe71f8272627a699d:A 2024-11-27T16:21:37,511 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073741995_1171 (size=12241) 2024-11-27T16:21:37,571 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=129, memsize=6.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/.tmp/A/9f3d3d6ed6554879b8f68e8e29589b69 2024-11-27T16:21:37,583 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/.tmp/B/1e84bcbb96d2497996a80ee638baa3f2 is 50, key is test_row_0/B:col10/1732724495166/Put/seqid=0 2024-11-27T16:21:37,620 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073741996_1172 (size=9657) 2024-11-27T16:21:37,621 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=6.71 KB at sequenceid=129 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/.tmp/B/1e84bcbb96d2497996a80ee638baa3f2 2024-11-27T16:21:37,629 INFO [master/7b191dec6496:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-11-27T16:21:37,629 INFO [master/7b191dec6496:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-11-27T16:21:37,642 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/.tmp/C/41d255ff308440f1b753295c647127dc is 50, key is test_row_0/C:col10/1732724495166/Put/seqid=0 2024-11-27T16:21:37,672 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073741997_1173 (size=9657) 2024-11-27T16:21:37,683 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:37,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39208 deadline: 1732724557681, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:37,687 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:37,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39168 deadline: 1732724557685, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:37,688 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:37,689 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:37,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39180 deadline: 1732724557685, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:37,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39228 deadline: 1732724557686, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:37,690 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:37,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39264 deadline: 1732724557687, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:37,922 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/.tmp/C/c15705e064e546e08cccba7c8051a29c as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/C/c15705e064e546e08cccba7c8051a29c 2024-11-27T16:21:37,931 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 0eeb0588edb2caebe71f8272627a699d/C of 0eeb0588edb2caebe71f8272627a699d into c15705e064e546e08cccba7c8051a29c(size=12.0 K), total size for store is 12.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T16:21:37,931 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 0eeb0588edb2caebe71f8272627a699d: 2024-11-27T16:21:37,931 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d., storeName=0eeb0588edb2caebe71f8272627a699d/C, priority=12, startTime=1732724496979; duration=0sec 2024-11-27T16:21:37,931 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T16:21:37,931 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 0eeb0588edb2caebe71f8272627a699d:C 2024-11-27T16:21:37,986 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:37,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39208 deadline: 1732724557985, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:37,991 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:37,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39168 deadline: 1732724557989, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:37,992 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:37,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39264 deadline: 1732724557991, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:37,993 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:37,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39180 deadline: 1732724557992, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:37,995 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:37,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39228 deadline: 1732724557994, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:38,073 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=6.71 KB at sequenceid=129 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/.tmp/C/41d255ff308440f1b753295c647127dc 2024-11-27T16:21:38,079 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/.tmp/A/9f3d3d6ed6554879b8f68e8e29589b69 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/A/9f3d3d6ed6554879b8f68e8e29589b69 2024-11-27T16:21:38,084 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/A/9f3d3d6ed6554879b8f68e8e29589b69, entries=100, sequenceid=129, filesize=21.8 K 2024-11-27T16:21:38,085 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/.tmp/B/1e84bcbb96d2497996a80ee638baa3f2 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/B/1e84bcbb96d2497996a80ee638baa3f2 2024-11-27T16:21:38,090 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/B/1e84bcbb96d2497996a80ee638baa3f2, entries=100, sequenceid=129, filesize=9.4 K 2024-11-27T16:21:38,091 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/.tmp/C/41d255ff308440f1b753295c647127dc as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/C/41d255ff308440f1b753295c647127dc 2024-11-27T16:21:38,098 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/C/41d255ff308440f1b753295c647127dc, entries=100, sequenceid=129, filesize=9.4 K 2024-11-27T16:21:38,101 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(3040): Finished flush of dataSize ~20.13 KB/20610, heapSize ~53.44 KB/54720, currentSize=181.14 KB/185490 for 0eeb0588edb2caebe71f8272627a699d in 1058ms, sequenceid=129, compaction requested=false 2024-11-27T16:21:38,101 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2538): Flush status journal for 0eeb0588edb2caebe71f8272627a699d: 2024-11-27T16:21:38,101 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d. 2024-11-27T16:21:38,101 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=52 2024-11-27T16:21:38,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4106): Remote procedure done, pid=52 2024-11-27T16:21:38,104 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=52, resume processing ppid=51 2024-11-27T16:21:38,104 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=52, ppid=51, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.9250 sec 2024-11-27T16:21:38,106 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=51, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=51, table=TestAcidGuarantees in 2.9310 sec 2024-11-27T16:21:38,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(8581): Flush requested on 0eeb0588edb2caebe71f8272627a699d 2024-11-27T16:21:38,492 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 0eeb0588edb2caebe71f8272627a699d 3/3 column families, dataSize=187.85 KB heapSize=492.94 KB 2024-11-27T16:21:38,492 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0eeb0588edb2caebe71f8272627a699d, store=A 2024-11-27T16:21:38,493 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:21:38,493 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0eeb0588edb2caebe71f8272627a699d, store=B 2024-11-27T16:21:38,493 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:21:38,493 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0eeb0588edb2caebe71f8272627a699d, store=C 2024-11-27T16:21:38,493 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:21:38,496 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:38,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39208 deadline: 1732724558495, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:38,497 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:38,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39228 deadline: 1732724558496, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:38,497 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:38,498 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:38,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39264 deadline: 1732724558497, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:38,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39168 deadline: 1732724558496, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:38,500 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:38,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39180 deadline: 1732724558499, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:38,502 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411275c73d8a97b7d4c6ca2e70233c29dab57_0eeb0588edb2caebe71f8272627a699d is 50, key is test_row_0/A:col10/1732724497370/Put/seqid=0 2024-11-27T16:21:38,535 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073741998_1174 (size=14794) 2024-11-27T16:21:38,537 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:38,542 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411275c73d8a97b7d4c6ca2e70233c29dab57_0eeb0588edb2caebe71f8272627a699d to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411275c73d8a97b7d4c6ca2e70233c29dab57_0eeb0588edb2caebe71f8272627a699d 2024-11-27T16:21:38,544 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/.tmp/A/7c75de37a69944dd9ce66c448466f510, store: [table=TestAcidGuarantees family=A region=0eeb0588edb2caebe71f8272627a699d] 2024-11-27T16:21:38,544 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/.tmp/A/7c75de37a69944dd9ce66c448466f510 is 175, key is test_row_0/A:col10/1732724497370/Put/seqid=0 2024-11-27T16:21:38,549 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073741999_1175 (size=39749) 2024-11-27T16:21:38,552 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=163, memsize=62.6 K, hasBloomFilter=true, into tmp file hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/.tmp/A/7c75de37a69944dd9ce66c448466f510 2024-11-27T16:21:38,560 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/.tmp/B/50776388d75b4536a2cffefa7246c55a is 50, key is test_row_0/B:col10/1732724497370/Put/seqid=0 2024-11-27T16:21:38,564 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742000_1176 (size=12151) 2024-11-27T16:21:38,568 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=62.62 KB at sequenceid=163 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/.tmp/B/50776388d75b4536a2cffefa7246c55a 2024-11-27T16:21:38,577 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/.tmp/C/432dd1347b4c46a1b11de9e597ea27c1 is 50, key is test_row_0/C:col10/1732724497370/Put/seqid=0 2024-11-27T16:21:38,598 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:38,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39208 deadline: 1732724558598, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:38,600 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742001_1177 (size=12151) 2024-11-27T16:21:38,601 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:38,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39168 deadline: 1732724558600, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:38,801 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:38,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39208 deadline: 1732724558800, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:38,803 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:38,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39168 deadline: 1732724558803, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:39,001 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=62.62 KB at sequenceid=163 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/.tmp/C/432dd1347b4c46a1b11de9e597ea27c1 2024-11-27T16:21:39,007 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/.tmp/A/7c75de37a69944dd9ce66c448466f510 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/A/7c75de37a69944dd9ce66c448466f510 2024-11-27T16:21:39,012 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/A/7c75de37a69944dd9ce66c448466f510, entries=200, sequenceid=163, filesize=38.8 K 2024-11-27T16:21:39,014 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/.tmp/B/50776388d75b4536a2cffefa7246c55a as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/B/50776388d75b4536a2cffefa7246c55a 2024-11-27T16:21:39,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,018 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/B/50776388d75b4536a2cffefa7246c55a, entries=150, sequenceid=163, filesize=11.9 K 2024-11-27T16:21:39,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,019 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/.tmp/C/432dd1347b4c46a1b11de9e597ea27c1 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/C/432dd1347b4c46a1b11de9e597ea27c1 2024-11-27T16:21:39,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,025 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/C/432dd1347b4c46a1b11de9e597ea27c1, entries=150, sequenceid=163, filesize=11.9 K 2024-11-27T16:21:39,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,026 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~187.85 KB/192360, heapSize ~492.89 KB/504720, currentSize=13.42 KB/13740 for 0eeb0588edb2caebe71f8272627a699d in 534ms, sequenceid=163, compaction requested=true 2024-11-27T16:21:39,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,026 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 0eeb0588edb2caebe71f8272627a699d: 2024-11-27T16:21:39,027 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 0eeb0588edb2caebe71f8272627a699d:A, priority=-2147483648, current under compaction store size is 1 2024-11-27T16:21:39,027 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T16:21:39,027 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 0eeb0588edb2caebe71f8272627a699d:B, priority=-2147483648, current under compaction store size is 2 2024-11-27T16:21:39,027 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-27T16:21:39,027 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-27T16:21:39,027 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 0eeb0588edb2caebe71f8272627a699d:C, priority=-2147483648, current under compaction store size is 3 2024-11-27T16:21:39,027 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-11-27T16:21:39,027 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-27T16:21:39,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,029 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 34049 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-27T16:21:39,029 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 93305 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-27T16:21:39,029 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HStore(1540): 0eeb0588edb2caebe71f8272627a699d/A is initiating minor compaction (all files) 2024-11-27T16:21:39,029 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HStore(1540): 0eeb0588edb2caebe71f8272627a699d/B is initiating minor compaction (all files) 2024-11-27T16:21:39,029 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 0eeb0588edb2caebe71f8272627a699d/A in TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d. 2024-11-27T16:21:39,029 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 0eeb0588edb2caebe71f8272627a699d/B in TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d. 2024-11-27T16:21:39,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,029 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/A/84dedb33735242dd92d918c6d26bd5fc, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/A/9f3d3d6ed6554879b8f68e8e29589b69, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/A/7c75de37a69944dd9ce66c448466f510] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/.tmp, totalSize=91.1 K 2024-11-27T16:21:39,029 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/B/a4e282ab441f485daffbc548800448dc, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/B/1e84bcbb96d2497996a80ee638baa3f2, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/B/50776388d75b4536a2cffefa7246c55a] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/.tmp, totalSize=33.3 K 2024-11-27T16:21:39,029 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d. 2024-11-27T16:21:39,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,029 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d. files: [hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/A/84dedb33735242dd92d918c6d26bd5fc, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/A/9f3d3d6ed6554879b8f68e8e29589b69, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/A/7c75de37a69944dd9ce66c448466f510] 2024-11-27T16:21:39,030 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting a4e282ab441f485daffbc548800448dc, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=123, earliestPutTs=1732724495049 2024-11-27T16:21:39,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,030 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 84dedb33735242dd92d918c6d26bd5fc, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=123, earliestPutTs=1732724495049 2024-11-27T16:21:39,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,031 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting 1e84bcbb96d2497996a80ee638baa3f2, keycount=100, bloomtype=ROW, size=9.4 K, encoding=NONE, compression=NONE, seqNum=129, earliestPutTs=1732724495166 2024-11-27T16:21:39,031 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9f3d3d6ed6554879b8f68e8e29589b69, keycount=100, bloomtype=ROW, size=21.8 K, encoding=NONE, compression=NONE, seqNum=129, earliestPutTs=1732724495166 2024-11-27T16:21:39,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,031 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting 50776388d75b4536a2cffefa7246c55a, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=163, earliestPutTs=1732724497362 2024-11-27T16:21:39,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,031 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7c75de37a69944dd9ce66c448466f510, keycount=200, bloomtype=ROW, size=38.8 K, encoding=NONE, compression=NONE, seqNum=163, earliestPutTs=1732724497362 2024-11-27T16:21:39,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,042 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 0eeb0588edb2caebe71f8272627a699d#B#compaction#150 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-27T16:21:39,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,043 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/.tmp/B/44caf2ca27f940b9a772be903e5103cb is 50, key is test_row_0/B:col10/1732724497370/Put/seqid=0 2024-11-27T16:21:39,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,045 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=0eeb0588edb2caebe71f8272627a699d] 2024-11-27T16:21:39,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,053 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241127137799c0997d465baa7167064aba20cd_0eeb0588edb2caebe71f8272627a699d store=[table=TestAcidGuarantees family=A region=0eeb0588edb2caebe71f8272627a699d] 2024-11-27T16:21:39,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,054 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241127137799c0997d465baa7167064aba20cd_0eeb0588edb2caebe71f8272627a699d, store=[table=TestAcidGuarantees family=A region=0eeb0588edb2caebe71f8272627a699d] 2024-11-27T16:21:39,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,055 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241127137799c0997d465baa7167064aba20cd_0eeb0588edb2caebe71f8272627a699d because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=0eeb0588edb2caebe71f8272627a699d] 2024-11-27T16:21:39,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,057 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742002_1178 (size=12493) 2024-11-27T16:21:39,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,061 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742003_1179 (size=4469) 2024-11-27T16:21:39,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,064 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 0eeb0588edb2caebe71f8272627a699d#A#compaction#151 average throughput is 1.29 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-27T16:21:39,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,065 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/.tmp/A/e75689ce70cd4c2282cd09bc44c6da67 is 175, key is test_row_0/A:col10/1732724497370/Put/seqid=0 2024-11-27T16:21:39,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,067 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/.tmp/B/44caf2ca27f940b9a772be903e5103cb as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/B/44caf2ca27f940b9a772be903e5103cb 2024-11-27T16:21:39,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,071 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742004_1180 (size=31447) 2024-11-27T16:21:39,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,073 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 0eeb0588edb2caebe71f8272627a699d/B of 0eeb0588edb2caebe71f8272627a699d into 44caf2ca27f940b9a772be903e5103cb(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T16:21:39,073 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 0eeb0588edb2caebe71f8272627a699d: 2024-11-27T16:21:39,073 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d., storeName=0eeb0588edb2caebe71f8272627a699d/B, priority=13, startTime=1732724499027; duration=0sec 2024-11-27T16:21:39,073 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-27T16:21:39,073 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 0eeb0588edb2caebe71f8272627a699d:B 2024-11-27T16:21:39,073 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-27T16:21:39,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,074 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 34049 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-27T16:21:39,075 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HStore(1540): 0eeb0588edb2caebe71f8272627a699d/C is initiating minor compaction (all files) 2024-11-27T16:21:39,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,075 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 0eeb0588edb2caebe71f8272627a699d/C in TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d. 2024-11-27T16:21:39,075 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/C/c15705e064e546e08cccba7c8051a29c, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/C/41d255ff308440f1b753295c647127dc, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/C/432dd1347b4c46a1b11de9e597ea27c1] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/.tmp, totalSize=33.3 K 2024-11-27T16:21:39,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,076 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting c15705e064e546e08cccba7c8051a29c, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=123, earliestPutTs=1732724495049 2024-11-27T16:21:39,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,077 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting 41d255ff308440f1b753295c647127dc, keycount=100, bloomtype=ROW, size=9.4 K, encoding=NONE, compression=NONE, seqNum=129, earliestPutTs=1732724495166 2024-11-27T16:21:39,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,078 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting 432dd1347b4c46a1b11de9e597ea27c1, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=163, earliestPutTs=1732724497362 2024-11-27T16:21:39,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,085 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/.tmp/A/e75689ce70cd4c2282cd09bc44c6da67 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/A/e75689ce70cd4c2282cd09bc44c6da67 2024-11-27T16:21:39,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,089 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 0eeb0588edb2caebe71f8272627a699d#C#compaction#152 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-27T16:21:39,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,089 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/.tmp/C/37c6d412b5744deda1f0bbc23e6fb74a is 50, key is test_row_0/C:col10/1732724497370/Put/seqid=0 2024-11-27T16:21:39,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,092 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 0eeb0588edb2caebe71f8272627a699d/A of 0eeb0588edb2caebe71f8272627a699d into e75689ce70cd4c2282cd09bc44c6da67(size=30.7 K), total size for store is 30.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T16:21:39,092 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 0eeb0588edb2caebe71f8272627a699d: 2024-11-27T16:21:39,092 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d., storeName=0eeb0588edb2caebe71f8272627a699d/A, priority=13, startTime=1732724499026; duration=0sec 2024-11-27T16:21:39,092 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T16:21:39,093 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 0eeb0588edb2caebe71f8272627a699d:A 2024-11-27T16:21:39,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,095 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742005_1181 (size=12493) 2024-11-27T16:21:39,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,102 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/.tmp/C/37c6d412b5744deda1f0bbc23e6fb74a as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/C/37c6d412b5744deda1f0bbc23e6fb74a 2024-11-27T16:21:39,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,110 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 0eeb0588edb2caebe71f8272627a699d/C of 0eeb0588edb2caebe71f8272627a699d into 37c6d412b5744deda1f0bbc23e6fb74a(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T16:21:39,110 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 0eeb0588edb2caebe71f8272627a699d: 2024-11-27T16:21:39,110 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d., storeName=0eeb0588edb2caebe71f8272627a699d/C, priority=13, startTime=1732724499027; duration=0sec 2024-11-27T16:21:39,110 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T16:21:39,110 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 0eeb0588edb2caebe71f8272627a699d:C 2024-11-27T16:21:39,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,132 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 0eeb0588edb2caebe71f8272627a699d 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-27T16:21:39,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(8581): Flush requested on 0eeb0588edb2caebe71f8272627a699d 2024-11-27T16:21:39,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,133 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0eeb0588edb2caebe71f8272627a699d, store=A 2024-11-27T16:21:39,133 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:21:39,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,133 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0eeb0588edb2caebe71f8272627a699d, store=B 2024-11-27T16:21:39,133 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:21:39,133 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0eeb0588edb2caebe71f8272627a699d, store=C 2024-11-27T16:21:39,134 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:21:39,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,169 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411270a21df611b4143689e156aa42f16e5b3_0eeb0588edb2caebe71f8272627a699d is 50, key is test_row_0/A:col10/1732724499132/Put/seqid=0 2024-11-27T16:21:39,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,207 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:39,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39168 deadline: 1732724559206, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:39,213 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:39,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39208 deadline: 1732724559207, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:39,217 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742007_1183 (size=24758) 2024-11-27T16:21:39,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=51 2024-11-27T16:21:39,286 INFO [Thread-681 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 51 completed 2024-11-27T16:21:39,288 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-27T16:21:39,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] procedure2.ProcedureExecutor(1098): Stored pid=53, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=53, table=TestAcidGuarantees 2024-11-27T16:21:39,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=53 2024-11-27T16:21:39,291 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=53, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=53, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-27T16:21:39,292 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=53, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=53, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-27T16:21:39,292 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=54, ppid=53, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-27T16:21:39,309 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:39,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39168 deadline: 1732724559308, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:39,316 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:39,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39208 deadline: 1732724559315, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:39,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=53 2024-11-27T16:21:39,444 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:21:39,445 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=54 2024-11-27T16:21:39,445 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d. 2024-11-27T16:21:39,445 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d. as already flushing 2024-11-27T16:21:39,446 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d. 2024-11-27T16:21:39,446 ERROR [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] handler.RSProcedureHandler(58): pid=54 java.io.IOException: Unable to complete flush {ENCODED => 0eeb0588edb2caebe71f8272627a699d, NAME => 'TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:21:39,446 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=54 java.io.IOException: Unable to complete flush {ENCODED => 0eeb0588edb2caebe71f8272627a699d, NAME => 'TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:21:39,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4114): Remote procedure failed, pid=54 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0eeb0588edb2caebe71f8272627a699d, NAME => 'TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0eeb0588edb2caebe71f8272627a699d, NAME => 'TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:21:39,504 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:39,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39228 deadline: 1732724559500, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:39,509 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:39,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39264 deadline: 1732724559508, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:39,513 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:39,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39180 deadline: 1732724559511, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:39,515 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:39,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39168 deadline: 1732724559512, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:39,520 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:39,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39208 deadline: 1732724559518, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:39,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=53 2024-11-27T16:21:39,599 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:21:39,599 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=54 2024-11-27T16:21:39,600 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d. 2024-11-27T16:21:39,600 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d. as already flushing 2024-11-27T16:21:39,600 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d. 2024-11-27T16:21:39,600 ERROR [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] handler.RSProcedureHandler(58): pid=54 java.io.IOException: Unable to complete flush {ENCODED => 0eeb0588edb2caebe71f8272627a699d, NAME => 'TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:21:39,600 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=54 java.io.IOException: Unable to complete flush {ENCODED => 0eeb0588edb2caebe71f8272627a699d, NAME => 'TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:21:39,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4114): Remote procedure failed, pid=54 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0eeb0588edb2caebe71f8272627a699d, NAME => 'TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0eeb0588edb2caebe71f8272627a699d, NAME => 'TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:21:39,619 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:39,639 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411270a21df611b4143689e156aa42f16e5b3_0eeb0588edb2caebe71f8272627a699d to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411270a21df611b4143689e156aa42f16e5b3_0eeb0588edb2caebe71f8272627a699d 2024-11-27T16:21:39,641 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/.tmp/A/a1bf1757dd28405ca76c7e9b82e8323f, store: [table=TestAcidGuarantees family=A region=0eeb0588edb2caebe71f8272627a699d] 2024-11-27T16:21:39,641 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/.tmp/A/a1bf1757dd28405ca76c7e9b82e8323f is 175, key is test_row_0/A:col10/1732724499132/Put/seqid=0 2024-11-27T16:21:39,644 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742006_1182 (size=74395) 2024-11-27T16:21:39,753 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:21:39,754 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=54 2024-11-27T16:21:39,754 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d. 2024-11-27T16:21:39,754 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d. as already flushing 2024-11-27T16:21:39,754 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d. 2024-11-27T16:21:39,754 ERROR [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] handler.RSProcedureHandler(58): pid=54 java.io.IOException: Unable to complete flush {ENCODED => 0eeb0588edb2caebe71f8272627a699d, NAME => 'TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:21:39,754 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=54 java.io.IOException: Unable to complete flush {ENCODED => 0eeb0588edb2caebe71f8272627a699d, NAME => 'TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:21:39,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4114): Remote procedure failed, pid=54 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0eeb0588edb2caebe71f8272627a699d, NAME => 'TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0eeb0588edb2caebe71f8272627a699d, NAME => 'TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:21:39,822 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:39,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39168 deadline: 1732724559819, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:39,823 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:39,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39208 deadline: 1732724559822, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:39,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=53 2024-11-27T16:21:39,907 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:21:39,908 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=54 2024-11-27T16:21:39,908 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d. 2024-11-27T16:21:39,908 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d. as already flushing 2024-11-27T16:21:39,908 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d. 2024-11-27T16:21:39,908 ERROR [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] handler.RSProcedureHandler(58): pid=54 java.io.IOException: Unable to complete flush {ENCODED => 0eeb0588edb2caebe71f8272627a699d, NAME => 'TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:21:39,908 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=54 java.io.IOException: Unable to complete flush {ENCODED => 0eeb0588edb2caebe71f8272627a699d, NAME => 'TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:21:39,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4114): Remote procedure failed, pid=54 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0eeb0588edb2caebe71f8272627a699d, NAME => 'TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0eeb0588edb2caebe71f8272627a699d, NAME => 'TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:21:40,056 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=178, memsize=20.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/.tmp/A/a1bf1757dd28405ca76c7e9b82e8323f 2024-11-27T16:21:40,061 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:21:40,062 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=54 2024-11-27T16:21:40,063 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d. 2024-11-27T16:21:40,063 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d. as already flushing 2024-11-27T16:21:40,063 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d. 2024-11-27T16:21:40,063 ERROR [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] handler.RSProcedureHandler(58): pid=54 java.io.IOException: Unable to complete flush {ENCODED => 0eeb0588edb2caebe71f8272627a699d, NAME => 'TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:21:40,063 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=54 java.io.IOException: Unable to complete flush {ENCODED => 0eeb0588edb2caebe71f8272627a699d, NAME => 'TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:21:40,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4114): Remote procedure failed, pid=54 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0eeb0588edb2caebe71f8272627a699d, NAME => 'TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0eeb0588edb2caebe71f8272627a699d, NAME => 'TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:21:40,070 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/.tmp/B/8bb5ad4c995e4ef68e0ba00c226b293e is 50, key is test_row_0/B:col10/1732724499132/Put/seqid=0 2024-11-27T16:21:40,108 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742008_1184 (size=12151) 2024-11-27T16:21:40,114 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=178 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/.tmp/B/8bb5ad4c995e4ef68e0ba00c226b293e 2024-11-27T16:21:40,124 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/.tmp/C/5e44303fe9a744e9ac247fbd24a6f52c is 50, key is test_row_0/C:col10/1732724499132/Put/seqid=0 2024-11-27T16:21:40,130 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742009_1185 (size=12151) 2024-11-27T16:21:40,216 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:21:40,216 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=54 2024-11-27T16:21:40,217 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d. 2024-11-27T16:21:40,217 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d. as already flushing 2024-11-27T16:21:40,217 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d. 2024-11-27T16:21:40,217 ERROR [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] handler.RSProcedureHandler(58): pid=54 java.io.IOException: Unable to complete flush {ENCODED => 0eeb0588edb2caebe71f8272627a699d, NAME => 'TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:21:40,217 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=54 java.io.IOException: Unable to complete flush {ENCODED => 0eeb0588edb2caebe71f8272627a699d, NAME => 'TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:21:40,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4114): Remote procedure failed, pid=54 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0eeb0588edb2caebe71f8272627a699d, NAME => 'TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0eeb0588edb2caebe71f8272627a699d, NAME => 'TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:21:40,325 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:40,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39168 deadline: 1732724560324, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:40,326 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:40,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39208 deadline: 1732724560325, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:40,370 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:21:40,370 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=54 2024-11-27T16:21:40,370 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d. 2024-11-27T16:21:40,370 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d. as already flushing 2024-11-27T16:21:40,371 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d. 2024-11-27T16:21:40,371 ERROR [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] handler.RSProcedureHandler(58): pid=54 java.io.IOException: Unable to complete flush {ENCODED => 0eeb0588edb2caebe71f8272627a699d, NAME => 'TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:21:40,371 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=54 java.io.IOException: Unable to complete flush {ENCODED => 0eeb0588edb2caebe71f8272627a699d, NAME => 'TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:21:40,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4114): Remote procedure failed, pid=54 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0eeb0588edb2caebe71f8272627a699d, NAME => 'TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0eeb0588edb2caebe71f8272627a699d, NAME => 'TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:21:40,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=53 2024-11-27T16:21:40,523 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:21:40,523 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=54 2024-11-27T16:21:40,523 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d. 2024-11-27T16:21:40,524 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d. as already flushing 2024-11-27T16:21:40,524 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d. 2024-11-27T16:21:40,524 ERROR [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] handler.RSProcedureHandler(58): pid=54 java.io.IOException: Unable to complete flush {ENCODED => 0eeb0588edb2caebe71f8272627a699d, NAME => 'TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:21:40,524 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=54 java.io.IOException: Unable to complete flush {ENCODED => 0eeb0588edb2caebe71f8272627a699d, NAME => 'TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:21:40,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4114): Remote procedure failed, pid=54 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0eeb0588edb2caebe71f8272627a699d, NAME => 'TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0eeb0588edb2caebe71f8272627a699d, NAME => 'TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:21:40,537 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=178 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/.tmp/C/5e44303fe9a744e9ac247fbd24a6f52c 2024-11-27T16:21:40,543 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/.tmp/A/a1bf1757dd28405ca76c7e9b82e8323f as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/A/a1bf1757dd28405ca76c7e9b82e8323f 2024-11-27T16:21:40,550 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/A/a1bf1757dd28405ca76c7e9b82e8323f, entries=400, sequenceid=178, filesize=72.7 K 2024-11-27T16:21:40,554 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/.tmp/B/8bb5ad4c995e4ef68e0ba00c226b293e as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/B/8bb5ad4c995e4ef68e0ba00c226b293e 2024-11-27T16:21:40,563 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/B/8bb5ad4c995e4ef68e0ba00c226b293e, entries=150, sequenceid=178, filesize=11.9 K 2024-11-27T16:21:40,565 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/.tmp/C/5e44303fe9a744e9ac247fbd24a6f52c as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/C/5e44303fe9a744e9ac247fbd24a6f52c 2024-11-27T16:21:40,573 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/C/5e44303fe9a744e9ac247fbd24a6f52c, entries=150, sequenceid=178, filesize=11.9 K 2024-11-27T16:21:40,574 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for 0eeb0588edb2caebe71f8272627a699d in 1442ms, sequenceid=178, compaction requested=false 2024-11-27T16:21:40,574 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 0eeb0588edb2caebe71f8272627a699d: 2024-11-27T16:21:40,676 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:21:40,677 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=54 2024-11-27T16:21:40,677 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d. 2024-11-27T16:21:40,677 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(2837): Flushing 0eeb0588edb2caebe71f8272627a699d 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-11-27T16:21:40,677 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0eeb0588edb2caebe71f8272627a699d, store=A 2024-11-27T16:21:40,678 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:21:40,678 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0eeb0588edb2caebe71f8272627a699d, store=B 2024-11-27T16:21:40,678 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:21:40,678 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0eeb0588edb2caebe71f8272627a699d, store=C 2024-11-27T16:21:40,678 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:21:40,689 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241127e0d4e0094ee24404bb0994718e3995c5_0eeb0588edb2caebe71f8272627a699d is 50, key is test_row_0/A:col10/1732724499200/Put/seqid=0 2024-11-27T16:21:40,695 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742010_1186 (size=12304) 2024-11-27T16:21:40,697 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,705 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241127e0d4e0094ee24404bb0994718e3995c5_0eeb0588edb2caebe71f8272627a699d to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241127e0d4e0094ee24404bb0994718e3995c5_0eeb0588edb2caebe71f8272627a699d 2024-11-27T16:21:40,708 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/.tmp/A/1f3dea9937904ab19b12c70974b9b4c3, store: [table=TestAcidGuarantees family=A region=0eeb0588edb2caebe71f8272627a699d] 2024-11-27T16:21:40,708 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/.tmp/A/1f3dea9937904ab19b12c70974b9b4c3 is 175, key is test_row_0/A:col10/1732724499200/Put/seqid=0 2024-11-27T16:21:40,727 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742011_1187 (size=31105) 2024-11-27T16:21:40,728 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=202, memsize=47.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/.tmp/A/1f3dea9937904ab19b12c70974b9b4c3 2024-11-27T16:21:40,741 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/.tmp/B/75bc4636c36f4e46a68fa4a815796495 is 50, key is test_row_0/B:col10/1732724499200/Put/seqid=0 2024-11-27T16:21:40,765 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742012_1188 (size=12151) 2024-11-27T16:21:40,767 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=202 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/.tmp/B/75bc4636c36f4e46a68fa4a815796495 2024-11-27T16:21:40,781 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/.tmp/C/39cf88e556754f7cbeb810db3b97d36e is 50, key is test_row_0/C:col10/1732724499200/Put/seqid=0 2024-11-27T16:21:40,804 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742013_1189 (size=12151) 2024-11-27T16:21:40,805 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=202 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/.tmp/C/39cf88e556754f7cbeb810db3b97d36e 2024-11-27T16:21:40,812 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/.tmp/A/1f3dea9937904ab19b12c70974b9b4c3 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/A/1f3dea9937904ab19b12c70974b9b4c3 2024-11-27T16:21:40,819 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/A/1f3dea9937904ab19b12c70974b9b4c3, entries=150, sequenceid=202, filesize=30.4 K 2024-11-27T16:21:40,820 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/.tmp/B/75bc4636c36f4e46a68fa4a815796495 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/B/75bc4636c36f4e46a68fa4a815796495 2024-11-27T16:21:40,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,827 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/B/75bc4636c36f4e46a68fa4a815796495, entries=150, sequenceid=202, filesize=11.9 K 2024-11-27T16:21:40,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,828 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/.tmp/C/39cf88e556754f7cbeb810db3b97d36e as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/C/39cf88e556754f7cbeb810db3b97d36e 2024-11-27T16:21:40,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,834 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/C/39cf88e556754f7cbeb810db3b97d36e, entries=150, sequenceid=202, filesize=11.9 K 2024-11-27T16:21:40,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,835 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=0 B/0 for 0eeb0588edb2caebe71f8272627a699d in 158ms, sequenceid=202, compaction requested=true 2024-11-27T16:21:40,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,836 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(2538): Flush status journal for 0eeb0588edb2caebe71f8272627a699d: 2024-11-27T16:21:40,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,836 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d. 2024-11-27T16:21:40,836 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=54 2024-11-27T16:21:40,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4106): Remote procedure done, pid=54 2024-11-27T16:21:40,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,839 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=54, resume processing ppid=53 2024-11-27T16:21:40,839 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=54, ppid=53, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.5450 sec 2024-11-27T16:21:40,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,841 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=53, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=53, table=TestAcidGuarantees in 1.5510 sec 2024-11-27T16:21:40,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:40,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=53 2024-11-27T16:21:41,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,395 INFO [Thread-681 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 53 completed 2024-11-27T16:21:41,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,398 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-27T16:21:41,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] procedure2.ProcedureExecutor(1098): Stored pid=55, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=55, table=TestAcidGuarantees 2024-11-27T16:21:41,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-11-27T16:21:41,401 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=55, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=55, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-27T16:21:41,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,402 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=55, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=55, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-27T16:21:41,402 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=56, ppid=55, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-27T16:21:41,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(8581): Flush requested on 0eeb0588edb2caebe71f8272627a699d 2024-11-27T16:21:41,429 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 0eeb0588edb2caebe71f8272627a699d 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-27T16:21:41,429 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0eeb0588edb2caebe71f8272627a699d, store=A 2024-11-27T16:21:41,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,429 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:21:41,429 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0eeb0588edb2caebe71f8272627a699d, store=B 2024-11-27T16:21:41,429 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:21:41,429 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0eeb0588edb2caebe71f8272627a699d, store=C 2024-11-27T16:21:41,429 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:21:41,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,467 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411279116acfc9e15435a906593982e14e41e_0eeb0588edb2caebe71f8272627a699d is 50, key is test_row_0/A:col10/1732724501426/Put/seqid=0 2024-11-27T16:21:41,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-11-27T16:21:41,514 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742014_1190 (size=22268) 2024-11-27T16:21:41,515 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:41,520 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411279116acfc9e15435a906593982e14e41e_0eeb0588edb2caebe71f8272627a699d to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411279116acfc9e15435a906593982e14e41e_0eeb0588edb2caebe71f8272627a699d 2024-11-27T16:21:41,521 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/.tmp/A/8fb84c3a6a23446395885a2df475f597, store: [table=TestAcidGuarantees family=A region=0eeb0588edb2caebe71f8272627a699d] 2024-11-27T16:21:41,522 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/.tmp/A/8fb84c3a6a23446395885a2df475f597 is 175, key is test_row_0/A:col10/1732724501426/Put/seqid=0 2024-11-27T16:21:41,540 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742015_1191 (size=65673) 2024-11-27T16:21:41,541 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=213, memsize=17.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/.tmp/A/8fb84c3a6a23446395885a2df475f597 2024-11-27T16:21:41,552 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:41,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39208 deadline: 1732724561538, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:41,554 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:41,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39228 deadline: 1732724561548, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:41,555 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:41,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39180 deadline: 1732724561549, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:41,555 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:21:41,555 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:41,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39264 deadline: 1732724561549, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:41,555 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=56 2024-11-27T16:21:41,555 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d. 2024-11-27T16:21:41,556 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d. as already flushing 2024-11-27T16:21:41,556 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d. 2024-11-27T16:21:41,556 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:41,556 ERROR [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] handler.RSProcedureHandler(58): pid=56 java.io.IOException: Unable to complete flush {ENCODED => 0eeb0588edb2caebe71f8272627a699d, NAME => 'TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:21:41,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39168 deadline: 1732724561552, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:41,556 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=56 java.io.IOException: Unable to complete flush {ENCODED => 0eeb0588edb2caebe71f8272627a699d, NAME => 'TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:21:41,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4114): Remote procedure failed, pid=56 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0eeb0588edb2caebe71f8272627a699d, NAME => 'TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0eeb0588edb2caebe71f8272627a699d, NAME => 'TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:21:41,564 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/.tmp/B/50bdb54d4a7a484b9b11f2705d67f7a0 is 50, key is test_row_0/B:col10/1732724501426/Put/seqid=0 2024-11-27T16:21:41,614 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742016_1192 (size=12151) 2024-11-27T16:21:41,655 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:41,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39208 deadline: 1732724561653, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:41,656 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:41,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39228 deadline: 1732724561656, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:41,660 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:41,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39180 deadline: 1732724561657, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:41,661 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:41,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39264 deadline: 1732724561657, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:41,661 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:41,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39168 deadline: 1732724561657, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:41,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-11-27T16:21:41,708 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:21:41,709 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=56 2024-11-27T16:21:41,709 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d. 2024-11-27T16:21:41,709 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d. as already flushing 2024-11-27T16:21:41,709 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d. 2024-11-27T16:21:41,709 ERROR [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] handler.RSProcedureHandler(58): pid=56 java.io.IOException: Unable to complete flush {ENCODED => 0eeb0588edb2caebe71f8272627a699d, NAME => 'TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:21:41,709 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=56 java.io.IOException: Unable to complete flush {ENCODED => 0eeb0588edb2caebe71f8272627a699d, NAME => 'TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:21:41,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4114): Remote procedure failed, pid=56 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0eeb0588edb2caebe71f8272627a699d, NAME => 'TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0eeb0588edb2caebe71f8272627a699d, NAME => 'TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:21:41,859 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:41,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39208 deadline: 1732724561859, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:41,860 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:41,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39228 deadline: 1732724561859, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:41,861 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:21:41,862 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=56 2024-11-27T16:21:41,862 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d. 2024-11-27T16:21:41,862 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d. as already flushing 2024-11-27T16:21:41,862 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d. 2024-11-27T16:21:41,862 ERROR [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] handler.RSProcedureHandler(58): pid=56 java.io.IOException: Unable to complete flush {ENCODED => 0eeb0588edb2caebe71f8272627a699d, NAME => 'TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:21:41,862 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=56 java.io.IOException: Unable to complete flush {ENCODED => 0eeb0588edb2caebe71f8272627a699d, NAME => 'TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:21:41,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4114): Remote procedure failed, pid=56 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0eeb0588edb2caebe71f8272627a699d, NAME => 'TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0eeb0588edb2caebe71f8272627a699d, NAME => 'TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:21:41,864 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:41,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39180 deadline: 1732724561862, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:41,865 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:41,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39264 deadline: 1732724561862, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:41,868 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:41,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39168 deadline: 1732724561864, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:42,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-11-27T16:21:42,015 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:21:42,015 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=56 2024-11-27T16:21:42,015 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d. 2024-11-27T16:21:42,015 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d. as already flushing 2024-11-27T16:21:42,015 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d. 2024-11-27T16:21:42,015 ERROR [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] handler.RSProcedureHandler(58): pid=56 java.io.IOException: Unable to complete flush {ENCODED => 0eeb0588edb2caebe71f8272627a699d, NAME => 'TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:21:42,016 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=56 java.io.IOException: Unable to complete flush {ENCODED => 0eeb0588edb2caebe71f8272627a699d, NAME => 'TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:21:42,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4114): Remote procedure failed, pid=56 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0eeb0588edb2caebe71f8272627a699d, NAME => 'TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0eeb0588edb2caebe71f8272627a699d, NAME => 'TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:21:42,024 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=213 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/.tmp/B/50bdb54d4a7a484b9b11f2705d67f7a0 2024-11-27T16:21:42,051 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/.tmp/C/7fa3cb27374e45a1af8e3e522cec87d1 is 50, key is test_row_0/C:col10/1732724501426/Put/seqid=0 2024-11-27T16:21:42,095 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742017_1193 (size=12151) 2024-11-27T16:21:42,096 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=213 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/.tmp/C/7fa3cb27374e45a1af8e3e522cec87d1 2024-11-27T16:21:42,104 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/.tmp/A/8fb84c3a6a23446395885a2df475f597 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/A/8fb84c3a6a23446395885a2df475f597 2024-11-27T16:21:42,109 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/A/8fb84c3a6a23446395885a2df475f597, entries=350, sequenceid=213, filesize=64.1 K 2024-11-27T16:21:42,111 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/.tmp/B/50bdb54d4a7a484b9b11f2705d67f7a0 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/B/50bdb54d4a7a484b9b11f2705d67f7a0 2024-11-27T16:21:42,115 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/B/50bdb54d4a7a484b9b11f2705d67f7a0, entries=150, sequenceid=213, filesize=11.9 K 2024-11-27T16:21:42,117 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/.tmp/C/7fa3cb27374e45a1af8e3e522cec87d1 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/C/7fa3cb27374e45a1af8e3e522cec87d1 2024-11-27T16:21:42,126 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/C/7fa3cb27374e45a1af8e3e522cec87d1, entries=150, sequenceid=213, filesize=11.9 K 2024-11-27T16:21:42,127 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for 0eeb0588edb2caebe71f8272627a699d in 698ms, sequenceid=213, compaction requested=true 2024-11-27T16:21:42,127 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 0eeb0588edb2caebe71f8272627a699d: 2024-11-27T16:21:42,127 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 0eeb0588edb2caebe71f8272627a699d:A, priority=-2147483648, current under compaction store size is 1 2024-11-27T16:21:42,127 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T16:21:42,128 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 0eeb0588edb2caebe71f8272627a699d:B, priority=-2147483648, current under compaction store size is 2 2024-11-27T16:21:42,128 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-27T16:21:42,128 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T16:21:42,128 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 0eeb0588edb2caebe71f8272627a699d:C, priority=-2147483648, current under compaction store size is 3 2024-11-27T16:21:42,128 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-27T16:21:42,128 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-27T16:21:42,130 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48946 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-27T16:21:42,130 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 202620 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-27T16:21:42,130 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HStore(1540): 0eeb0588edb2caebe71f8272627a699d/A is initiating minor compaction (all files) 2024-11-27T16:21:42,130 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HStore(1540): 0eeb0588edb2caebe71f8272627a699d/B is initiating minor compaction (all files) 2024-11-27T16:21:42,130 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 0eeb0588edb2caebe71f8272627a699d/A in TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d. 2024-11-27T16:21:42,130 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 0eeb0588edb2caebe71f8272627a699d/B in TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d. 2024-11-27T16:21:42,130 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/B/44caf2ca27f940b9a772be903e5103cb, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/B/8bb5ad4c995e4ef68e0ba00c226b293e, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/B/75bc4636c36f4e46a68fa4a815796495, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/B/50bdb54d4a7a484b9b11f2705d67f7a0] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/.tmp, totalSize=47.8 K 2024-11-27T16:21:42,130 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/A/e75689ce70cd4c2282cd09bc44c6da67, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/A/a1bf1757dd28405ca76c7e9b82e8323f, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/A/1f3dea9937904ab19b12c70974b9b4c3, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/A/8fb84c3a6a23446395885a2df475f597] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/.tmp, totalSize=197.9 K 2024-11-27T16:21:42,130 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=12 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d. 2024-11-27T16:21:42,130 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d. files: [hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/A/e75689ce70cd4c2282cd09bc44c6da67, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/A/a1bf1757dd28405ca76c7e9b82e8323f, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/A/1f3dea9937904ab19b12c70974b9b4c3, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/A/8fb84c3a6a23446395885a2df475f597] 2024-11-27T16:21:42,131 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting 44caf2ca27f940b9a772be903e5103cb, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=163, earliestPutTs=1732724497362 2024-11-27T16:21:42,131 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.Compactor(224): Compacting e75689ce70cd4c2282cd09bc44c6da67, keycount=150, bloomtype=ROW, size=30.7 K, encoding=NONE, compression=NONE, seqNum=163, earliestPutTs=1732724497362 2024-11-27T16:21:42,131 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting 8bb5ad4c995e4ef68e0ba00c226b293e, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=178, earliestPutTs=1732724499128 2024-11-27T16:21:42,132 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting 75bc4636c36f4e46a68fa4a815796495, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=202, earliestPutTs=1732724499153 2024-11-27T16:21:42,132 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.Compactor(224): Compacting a1bf1757dd28405ca76c7e9b82e8323f, keycount=400, bloomtype=ROW, size=72.7 K, encoding=NONE, compression=NONE, seqNum=178, earliestPutTs=1732724498495 2024-11-27T16:21:42,133 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting 50bdb54d4a7a484b9b11f2705d67f7a0, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=213, earliestPutTs=1732724501409 2024-11-27T16:21:42,133 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1f3dea9937904ab19b12c70974b9b4c3, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=202, earliestPutTs=1732724499153 2024-11-27T16:21:42,133 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8fb84c3a6a23446395885a2df475f597, keycount=350, bloomtype=ROW, size=64.1 K, encoding=NONE, compression=NONE, seqNum=213, earliestPutTs=1732724501368 2024-11-27T16:21:42,146 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 0eeb0588edb2caebe71f8272627a699d#B#compaction#162 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-27T16:21:42,146 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=0eeb0588edb2caebe71f8272627a699d] 2024-11-27T16:21:42,147 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/.tmp/B/49bc75baae1d4671b939a8ff7d58e5bd is 50, key is test_row_0/B:col10/1732724501426/Put/seqid=0 2024-11-27T16:21:42,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(8581): Flush requested on 0eeb0588edb2caebe71f8272627a699d 2024-11-27T16:21:42,163 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 0eeb0588edb2caebe71f8272627a699d 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-11-27T16:21:42,163 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0eeb0588edb2caebe71f8272627a699d, store=A 2024-11-27T16:21:42,164 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:21:42,164 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0eeb0588edb2caebe71f8272627a699d, store=B 2024-11-27T16:21:42,164 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:21:42,164 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0eeb0588edb2caebe71f8272627a699d, store=C 2024-11-27T16:21:42,164 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:21:42,169 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:21:42,170 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=56 2024-11-27T16:21:42,170 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d. 2024-11-27T16:21:42,170 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d. as already flushing 2024-11-27T16:21:42,170 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d. 2024-11-27T16:21:42,170 ERROR [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] handler.RSProcedureHandler(58): pid=56 java.io.IOException: Unable to complete flush {ENCODED => 0eeb0588edb2caebe71f8272627a699d, NAME => 'TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:21:42,170 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=56 java.io.IOException: Unable to complete flush {ENCODED => 0eeb0588edb2caebe71f8272627a699d, NAME => 'TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:21:42,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4114): Remote procedure failed, pid=56 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0eeb0588edb2caebe71f8272627a699d, NAME => 'TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0eeb0588edb2caebe71f8272627a699d, NAME => 'TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:21:42,172 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241127ed39c32f2c494f8dbc133a16935af3a2_0eeb0588edb2caebe71f8272627a699d store=[table=TestAcidGuarantees family=A region=0eeb0588edb2caebe71f8272627a699d] 2024-11-27T16:21:42,176 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241127ed39c32f2c494f8dbc133a16935af3a2_0eeb0588edb2caebe71f8272627a699d, store=[table=TestAcidGuarantees family=A region=0eeb0588edb2caebe71f8272627a699d] 2024-11-27T16:21:42,177 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241127ed39c32f2c494f8dbc133a16935af3a2_0eeb0588edb2caebe71f8272627a699d because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=0eeb0588edb2caebe71f8272627a699d] 2024-11-27T16:21:42,183 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:42,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39180 deadline: 1732724562177, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:42,183 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:42,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39168 deadline: 1732724562178, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:42,186 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:42,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39208 deadline: 1732724562182, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:42,188 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:42,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39228 deadline: 1732724562183, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:42,188 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:42,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39264 deadline: 1732724562183, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:42,222 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411273bc8afd2a7cb4c77bb56b921c2ff9998_0eeb0588edb2caebe71f8272627a699d is 50, key is test_row_0/A:col10/1732724501550/Put/seqid=0 2024-11-27T16:21:42,227 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742018_1194 (size=12629) 2024-11-27T16:21:42,233 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742020_1196 (size=14794) 2024-11-27T16:21:42,235 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:42,236 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/.tmp/B/49bc75baae1d4671b939a8ff7d58e5bd as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/B/49bc75baae1d4671b939a8ff7d58e5bd 2024-11-27T16:21:42,240 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411273bc8afd2a7cb4c77bb56b921c2ff9998_0eeb0588edb2caebe71f8272627a699d to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411273bc8afd2a7cb4c77bb56b921c2ff9998_0eeb0588edb2caebe71f8272627a699d 2024-11-27T16:21:42,241 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742019_1195 (size=4469) 2024-11-27T16:21:42,241 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/.tmp/A/1f6fc541afdc4655b4f1ca4fb7eb6d65, store: [table=TestAcidGuarantees family=A region=0eeb0588edb2caebe71f8272627a699d] 2024-11-27T16:21:42,242 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/.tmp/A/1f6fc541afdc4655b4f1ca4fb7eb6d65 is 175, key is test_row_0/A:col10/1732724501550/Put/seqid=0 2024-11-27T16:21:42,259 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 0eeb0588edb2caebe71f8272627a699d/B of 0eeb0588edb2caebe71f8272627a699d into 49bc75baae1d4671b939a8ff7d58e5bd(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T16:21:42,259 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 0eeb0588edb2caebe71f8272627a699d: 2024-11-27T16:21:42,259 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d., storeName=0eeb0588edb2caebe71f8272627a699d/B, priority=12, startTime=1732724502127; duration=0sec 2024-11-27T16:21:42,259 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-27T16:21:42,259 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 0eeb0588edb2caebe71f8272627a699d:B 2024-11-27T16:21:42,259 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-27T16:21:42,265 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48946 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-27T16:21:42,265 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HStore(1540): 0eeb0588edb2caebe71f8272627a699d/C is initiating minor compaction (all files) 2024-11-27T16:21:42,265 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 0eeb0588edb2caebe71f8272627a699d/C in TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d. 2024-11-27T16:21:42,265 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/C/37c6d412b5744deda1f0bbc23e6fb74a, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/C/5e44303fe9a744e9ac247fbd24a6f52c, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/C/39cf88e556754f7cbeb810db3b97d36e, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/C/7fa3cb27374e45a1af8e3e522cec87d1] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/.tmp, totalSize=47.8 K 2024-11-27T16:21:42,265 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting 37c6d412b5744deda1f0bbc23e6fb74a, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=163, earliestPutTs=1732724497362 2024-11-27T16:21:42,266 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting 5e44303fe9a744e9ac247fbd24a6f52c, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=178, earliestPutTs=1732724499128 2024-11-27T16:21:42,266 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting 39cf88e556754f7cbeb810db3b97d36e, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=202, earliestPutTs=1732724499153 2024-11-27T16:21:42,267 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting 7fa3cb27374e45a1af8e3e522cec87d1, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=213, earliestPutTs=1732724501409 2024-11-27T16:21:42,282 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 0eeb0588edb2caebe71f8272627a699d#C#compaction#165 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-27T16:21:42,282 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/.tmp/C/b7bd01a0b3fd44ef901efe6966a78353 is 50, key is test_row_0/C:col10/1732724501426/Put/seqid=0 2024-11-27T16:21:42,285 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742021_1197 (size=39749) 2024-11-27T16:21:42,285 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:42,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39180 deadline: 1732724562285, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:42,286 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=239, memsize=51.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/.tmp/A/1f6fc541afdc4655b4f1ca4fb7eb6d65 2024-11-27T16:21:42,286 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:42,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39168 deadline: 1732724562285, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:42,291 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:42,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39208 deadline: 1732724562288, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:42,298 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:42,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39228 deadline: 1732724562296, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:42,298 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:42,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39264 deadline: 1732724562296, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:42,303 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/.tmp/B/0d24befe3d2b4a1fb058b0599d7aa4a8 is 50, key is test_row_0/B:col10/1732724501550/Put/seqid=0 2024-11-27T16:21:42,306 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742022_1198 (size=12629) 2024-11-27T16:21:42,313 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742023_1199 (size=12151) 2024-11-27T16:21:42,324 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:21:42,325 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=56 2024-11-27T16:21:42,325 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d. 2024-11-27T16:21:42,325 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d. as already flushing 2024-11-27T16:21:42,325 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d. 2024-11-27T16:21:42,325 ERROR [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] handler.RSProcedureHandler(58): pid=56 java.io.IOException: Unable to complete flush {ENCODED => 0eeb0588edb2caebe71f8272627a699d, NAME => 'TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:21:42,325 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=56 java.io.IOException: Unable to complete flush {ENCODED => 0eeb0588edb2caebe71f8272627a699d, NAME => 'TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:21:42,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4114): Remote procedure failed, pid=56 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0eeb0588edb2caebe71f8272627a699d, NAME => 'TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0eeb0588edb2caebe71f8272627a699d, NAME => 'TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:21:42,478 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:21:42,479 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=56 2024-11-27T16:21:42,479 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d. 2024-11-27T16:21:42,479 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d. as already flushing 2024-11-27T16:21:42,479 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d. 2024-11-27T16:21:42,479 ERROR [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] handler.RSProcedureHandler(58): pid=56 java.io.IOException: Unable to complete flush {ENCODED => 0eeb0588edb2caebe71f8272627a699d, NAME => 'TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:21:42,479 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=56 java.io.IOException: Unable to complete flush {ENCODED => 0eeb0588edb2caebe71f8272627a699d, NAME => 'TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:21:42,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4114): Remote procedure failed, pid=56 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0eeb0588edb2caebe71f8272627a699d, NAME => 'TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0eeb0588edb2caebe71f8272627a699d, NAME => 'TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:21:42,489 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:42,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39180 deadline: 1732724562487, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:42,492 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:42,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39168 deadline: 1732724562489, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:42,494 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:42,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39208 deadline: 1732724562493, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:42,500 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:42,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39228 deadline: 1732724562500, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:42,501 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:42,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39264 deadline: 1732724562501, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:42,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-11-27T16:21:42,632 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:21:42,632 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=56 2024-11-27T16:21:42,632 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d. 2024-11-27T16:21:42,633 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d. as already flushing 2024-11-27T16:21:42,633 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d. 2024-11-27T16:21:42,633 ERROR [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] handler.RSProcedureHandler(58): pid=56 java.io.IOException: Unable to complete flush {ENCODED => 0eeb0588edb2caebe71f8272627a699d, NAME => 'TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:21:42,633 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=56 java.io.IOException: Unable to complete flush {ENCODED => 0eeb0588edb2caebe71f8272627a699d, NAME => 'TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:21:42,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4114): Remote procedure failed, pid=56 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0eeb0588edb2caebe71f8272627a699d, NAME => 'TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0eeb0588edb2caebe71f8272627a699d, NAME => 'TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:21:42,642 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 0eeb0588edb2caebe71f8272627a699d#A#compaction#163 average throughput is 0.05 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-27T16:21:42,643 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/.tmp/A/82022cf451fb4521b018879e91abafd8 is 175, key is test_row_0/A:col10/1732724501426/Put/seqid=0 2024-11-27T16:21:42,647 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742024_1200 (size=31583) 2024-11-27T16:21:42,711 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/.tmp/C/b7bd01a0b3fd44ef901efe6966a78353 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/C/b7bd01a0b3fd44ef901efe6966a78353 2024-11-27T16:21:42,715 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=239 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/.tmp/B/0d24befe3d2b4a1fb058b0599d7aa4a8 2024-11-27T16:21:42,718 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 0eeb0588edb2caebe71f8272627a699d/C of 0eeb0588edb2caebe71f8272627a699d into b7bd01a0b3fd44ef901efe6966a78353(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T16:21:42,719 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 0eeb0588edb2caebe71f8272627a699d: 2024-11-27T16:21:42,719 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d., storeName=0eeb0588edb2caebe71f8272627a699d/C, priority=12, startTime=1732724502128; duration=0sec 2024-11-27T16:21:42,719 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T16:21:42,719 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 0eeb0588edb2caebe71f8272627a699d:C 2024-11-27T16:21:42,726 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/.tmp/C/b4bff3ba2a4d42fab6d83be557eb45e3 is 50, key is test_row_0/C:col10/1732724501550/Put/seqid=0 2024-11-27T16:21:42,753 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742025_1201 (size=12151) 2024-11-27T16:21:42,754 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=239 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/.tmp/C/b4bff3ba2a4d42fab6d83be557eb45e3 2024-11-27T16:21:42,760 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/.tmp/A/1f6fc541afdc4655b4f1ca4fb7eb6d65 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/A/1f6fc541afdc4655b4f1ca4fb7eb6d65 2024-11-27T16:21:42,769 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/A/1f6fc541afdc4655b4f1ca4fb7eb6d65, entries=200, sequenceid=239, filesize=38.8 K 2024-11-27T16:21:42,770 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/.tmp/B/0d24befe3d2b4a1fb058b0599d7aa4a8 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/B/0d24befe3d2b4a1fb058b0599d7aa4a8 2024-11-27T16:21:42,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:42,777 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/B/0d24befe3d2b4a1fb058b0599d7aa4a8, entries=150, sequenceid=239, filesize=11.9 K 2024-11-27T16:21:42,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:42,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:42,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:42,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:42,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:42,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:42,785 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:21:42,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:42,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:42,786 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=56 2024-11-27T16:21:42,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:42,786 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/.tmp/C/b4bff3ba2a4d42fab6d83be557eb45e3 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/C/b4bff3ba2a4d42fab6d83be557eb45e3 2024-11-27T16:21:42,786 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d. 2024-11-27T16:21:42,786 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d. as already flushing 2024-11-27T16:21:42,786 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d. 2024-11-27T16:21:42,787 ERROR [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] handler.RSProcedureHandler(58): pid=56 java.io.IOException: Unable to complete flush {ENCODED => 0eeb0588edb2caebe71f8272627a699d, NAME => 'TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:21:42,787 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=56 java.io.IOException: Unable to complete flush {ENCODED => 0eeb0588edb2caebe71f8272627a699d, NAME => 'TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:21:42,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:42,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:42,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4114): Remote procedure failed, pid=56 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0eeb0588edb2caebe71f8272627a699d, NAME => 'TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0eeb0588edb2caebe71f8272627a699d, NAME => 'TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:21:42,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:42,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:42,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:42,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:42,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:42,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:42,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:42,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:42,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:42,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:42,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:42,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:42,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:42,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:42,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:42,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:42,797 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:42,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39180 deadline: 1732724562793, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:42,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:42,798 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:42,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39168 deadline: 1732724562793, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:42,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:42,798 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/C/b4bff3ba2a4d42fab6d83be557eb45e3, entries=150, sequenceid=239, filesize=11.9 K 2024-11-27T16:21:42,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:42,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:42,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:42,799 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=53.67 KB/54960 for 0eeb0588edb2caebe71f8272627a699d in 636ms, sequenceid=239, compaction requested=false 2024-11-27T16:21:42,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:42,799 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 0eeb0588edb2caebe71f8272627a699d: 2024-11-27T16:21:42,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:42,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:42,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:42,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:42,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:42,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:42,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:42,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:42,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:42,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:42,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(8581): Flush requested on 0eeb0588edb2caebe71f8272627a699d 2024-11-27T16:21:42,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:42,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:42,804 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 0eeb0588edb2caebe71f8272627a699d 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-11-27T16:21:42,805 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0eeb0588edb2caebe71f8272627a699d, store=A 2024-11-27T16:21:42,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:42,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:42,805 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:21:42,805 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0eeb0588edb2caebe71f8272627a699d, store=B 2024-11-27T16:21:42,805 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:21:42,805 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0eeb0588edb2caebe71f8272627a699d, store=C 2024-11-27T16:21:42,805 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:21:42,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:42,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:42,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:42,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:42,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:42,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:42,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:42,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:42,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:42,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:42,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:42,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:42,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:42,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:42,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:42,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:42,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:42,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:42,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:42,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:42,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:42,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:42,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:42,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:42,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:42,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:42,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:42,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:42,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:42,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:42,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:42,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:42,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:42,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:42,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:42,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:42,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:42,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:42,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:42,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:42,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:42,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:42,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:42,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:42,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:42,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:42,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:42,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:42,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:42,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:42,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:42,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:42,831 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241127549c59aa3e5f469a8933dec924295282_0eeb0588edb2caebe71f8272627a699d is 50, key is test_row_0/A:col10/1732724502181/Put/seqid=0 2024-11-27T16:21:42,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:42,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:42,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:42,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:42,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:42,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:42,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:42,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:42,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:42,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:42,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:42,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:42,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:42,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:42,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:42,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:42,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:42,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:42,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:42,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:42,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:42,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:42,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:42,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:42,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:42,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:42,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:42,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:42,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:42,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:42,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:42,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:42,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:42,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:42,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:42,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:42,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:42,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:42,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:42,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:42,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:42,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:42,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:42,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:42,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:42,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:42,861 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:42,861 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:42,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:42,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39228 deadline: 1732724562856, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:42,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39208 deadline: 1732724562858, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:42,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:42,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:42,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:42,866 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:42,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39264 deadline: 1732724562862, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:42,870 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742026_1202 (size=12304) 2024-11-27T16:21:42,939 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:21:42,940 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=56 2024-11-27T16:21:42,940 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d. 2024-11-27T16:21:42,940 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d. as already flushing 2024-11-27T16:21:42,940 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d. 2024-11-27T16:21:42,940 ERROR [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] handler.RSProcedureHandler(58): pid=56 java.io.IOException: Unable to complete flush {ENCODED => 0eeb0588edb2caebe71f8272627a699d, NAME => 'TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:21:42,940 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=56 java.io.IOException: Unable to complete flush {ENCODED => 0eeb0588edb2caebe71f8272627a699d, NAME => 'TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:21:42,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4114): Remote procedure failed, pid=56 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0eeb0588edb2caebe71f8272627a699d, NAME => 'TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0eeb0588edb2caebe71f8272627a699d, NAME => 'TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:21:42,966 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:42,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39228 deadline: 1732724562964, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:42,967 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:42,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 144 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39208 deadline: 1732724562964, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:42,970 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:42,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39264 deadline: 1732724562969, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:43,058 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/.tmp/A/82022cf451fb4521b018879e91abafd8 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/A/82022cf451fb4521b018879e91abafd8 2024-11-27T16:21:43,068 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 0eeb0588edb2caebe71f8272627a699d/A of 0eeb0588edb2caebe71f8272627a699d into 82022cf451fb4521b018879e91abafd8(size=30.8 K), total size for store is 69.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T16:21:43,068 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 0eeb0588edb2caebe71f8272627a699d: 2024-11-27T16:21:43,068 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d., storeName=0eeb0588edb2caebe71f8272627a699d/A, priority=12, startTime=1732724502127; duration=0sec 2024-11-27T16:21:43,068 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T16:21:43,068 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 0eeb0588edb2caebe71f8272627a699d:A 2024-11-27T16:21:43,093 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:21:43,094 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=56 2024-11-27T16:21:43,094 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d. 2024-11-27T16:21:43,094 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d. as already flushing 2024-11-27T16:21:43,094 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d. 2024-11-27T16:21:43,094 ERROR [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] handler.RSProcedureHandler(58): pid=56 java.io.IOException: Unable to complete flush {ENCODED => 0eeb0588edb2caebe71f8272627a699d, NAME => 'TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:21:43,094 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=56 java.io.IOException: Unable to complete flush {ENCODED => 0eeb0588edb2caebe71f8272627a699d, NAME => 'TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:21:43,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4114): Remote procedure failed, pid=56 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0eeb0588edb2caebe71f8272627a699d, NAME => 'TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0eeb0588edb2caebe71f8272627a699d, NAME => 'TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:21:43,168 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:43,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39228 deadline: 1732724563167, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:43,171 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:43,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39208 deadline: 1732724563169, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:43,174 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:43,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39264 deadline: 1732724563172, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:43,247 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:21:43,247 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=56 2024-11-27T16:21:43,247 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d. 2024-11-27T16:21:43,247 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d. as already flushing 2024-11-27T16:21:43,247 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d. 2024-11-27T16:21:43,247 ERROR [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] handler.RSProcedureHandler(58): pid=56 java.io.IOException: Unable to complete flush {ENCODED => 0eeb0588edb2caebe71f8272627a699d, NAME => 'TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:21:43,248 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=56 java.io.IOException: Unable to complete flush {ENCODED => 0eeb0588edb2caebe71f8272627a699d, NAME => 'TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:21:43,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4114): Remote procedure failed, pid=56 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0eeb0588edb2caebe71f8272627a699d, NAME => 'TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0eeb0588edb2caebe71f8272627a699d, NAME => 'TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:21:43,271 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:43,276 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241127549c59aa3e5f469a8933dec924295282_0eeb0588edb2caebe71f8272627a699d to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241127549c59aa3e5f469a8933dec924295282_0eeb0588edb2caebe71f8272627a699d 2024-11-27T16:21:43,277 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/.tmp/A/c6923370c87145e9965cff9758c379bc, store: [table=TestAcidGuarantees family=A region=0eeb0588edb2caebe71f8272627a699d] 2024-11-27T16:21:43,278 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/.tmp/A/c6923370c87145e9965cff9758c379bc is 175, key is test_row_0/A:col10/1732724502181/Put/seqid=0 2024-11-27T16:21:43,288 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742027_1203 (size=31101) 2024-11-27T16:21:43,301 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:43,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39168 deadline: 1732724563299, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:43,301 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:43,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39180 deadline: 1732724563300, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:43,400 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:21:43,400 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=56 2024-11-27T16:21:43,400 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d. 2024-11-27T16:21:43,401 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d. as already flushing 2024-11-27T16:21:43,401 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d. 2024-11-27T16:21:43,401 ERROR [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] handler.RSProcedureHandler(58): pid=56 java.io.IOException: Unable to complete flush {ENCODED => 0eeb0588edb2caebe71f8272627a699d, NAME => 'TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:21:43,401 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=56 java.io.IOException: Unable to complete flush {ENCODED => 0eeb0588edb2caebe71f8272627a699d, NAME => 'TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:21:43,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4114): Remote procedure failed, pid=56 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0eeb0588edb2caebe71f8272627a699d, NAME => 'TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0eeb0588edb2caebe71f8272627a699d, NAME => 'TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:21:43,470 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:43,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39228 deadline: 1732724563470, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:43,472 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:43,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39208 deadline: 1732724563472, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:43,479 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:43,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39264 deadline: 1732724563476, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:43,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-11-27T16:21:43,553 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:21:43,553 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=56 2024-11-27T16:21:43,554 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d. 2024-11-27T16:21:43,554 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d. as already flushing 2024-11-27T16:21:43,554 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d. 2024-11-27T16:21:43,554 ERROR [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] handler.RSProcedureHandler(58): pid=56 java.io.IOException: Unable to complete flush {ENCODED => 0eeb0588edb2caebe71f8272627a699d, NAME => 'TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:21:43,554 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=56 java.io.IOException: Unable to complete flush {ENCODED => 0eeb0588edb2caebe71f8272627a699d, NAME => 'TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:21:43,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4114): Remote procedure failed, pid=56 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0eeb0588edb2caebe71f8272627a699d, NAME => 'TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0eeb0588edb2caebe71f8272627a699d, NAME => 'TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:21:43,688 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=253, memsize=20.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/.tmp/A/c6923370c87145e9965cff9758c379bc 2024-11-27T16:21:43,700 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/.tmp/B/3963b05c221e4790a12833cddfb5f1c2 is 50, key is test_row_0/B:col10/1732724502181/Put/seqid=0 2024-11-27T16:21:43,707 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:21:43,708 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=56 2024-11-27T16:21:43,708 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d. 2024-11-27T16:21:43,708 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d. as already flushing 2024-11-27T16:21:43,708 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d. 2024-11-27T16:21:43,708 ERROR [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] handler.RSProcedureHandler(58): pid=56 java.io.IOException: Unable to complete flush {ENCODED => 0eeb0588edb2caebe71f8272627a699d, NAME => 'TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:21:43,708 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=56 java.io.IOException: Unable to complete flush {ENCODED => 0eeb0588edb2caebe71f8272627a699d, NAME => 'TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:21:43,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4114): Remote procedure failed, pid=56 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0eeb0588edb2caebe71f8272627a699d, NAME => 'TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0eeb0588edb2caebe71f8272627a699d, NAME => 'TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:21:43,721 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742028_1204 (size=9757) 2024-11-27T16:21:43,722 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=253 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/.tmp/B/3963b05c221e4790a12833cddfb5f1c2 2024-11-27T16:21:43,729 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/.tmp/C/29da542e398046f5b7d5375b9d2fdfae is 50, key is test_row_0/C:col10/1732724502181/Put/seqid=0 2024-11-27T16:21:43,757 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742029_1205 (size=9757) 2024-11-27T16:21:43,861 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:21:43,861 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=56 2024-11-27T16:21:43,861 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d. 2024-11-27T16:21:43,861 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d. as already flushing 2024-11-27T16:21:43,861 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d. 2024-11-27T16:21:43,861 ERROR [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] handler.RSProcedureHandler(58): pid=56 java.io.IOException: Unable to complete flush {ENCODED => 0eeb0588edb2caebe71f8272627a699d, NAME => 'TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:21:43,862 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=56 java.io.IOException: Unable to complete flush {ENCODED => 0eeb0588edb2caebe71f8272627a699d, NAME => 'TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:21:43,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4114): Remote procedure failed, pid=56 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0eeb0588edb2caebe71f8272627a699d, NAME => 'TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0eeb0588edb2caebe71f8272627a699d, NAME => 'TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:21:43,974 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:43,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39208 deadline: 1732724563974, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:43,976 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:43,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39228 deadline: 1732724563975, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:43,984 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:43,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39264 deadline: 1732724563982, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:44,014 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:21:44,014 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=56 2024-11-27T16:21:44,014 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d. 2024-11-27T16:21:44,014 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d. as already flushing 2024-11-27T16:21:44,014 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d. 2024-11-27T16:21:44,015 ERROR [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] handler.RSProcedureHandler(58): pid=56 java.io.IOException: Unable to complete flush {ENCODED => 0eeb0588edb2caebe71f8272627a699d, NAME => 'TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:21:44,015 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=56 java.io.IOException: Unable to complete flush {ENCODED => 0eeb0588edb2caebe71f8272627a699d, NAME => 'TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:21:44,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4114): Remote procedure failed, pid=56 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0eeb0588edb2caebe71f8272627a699d, NAME => 'TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0eeb0588edb2caebe71f8272627a699d, NAME => 'TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:21:44,167 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=253 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/.tmp/C/29da542e398046f5b7d5375b9d2fdfae 2024-11-27T16:21:44,169 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:21:44,170 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=56 2024-11-27T16:21:44,170 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d. 2024-11-27T16:21:44,170 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d. as already flushing 2024-11-27T16:21:44,170 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d. 2024-11-27T16:21:44,170 ERROR [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] handler.RSProcedureHandler(58): pid=56 java.io.IOException: Unable to complete flush {ENCODED => 0eeb0588edb2caebe71f8272627a699d, NAME => 'TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:21:44,170 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=56 java.io.IOException: Unable to complete flush {ENCODED => 0eeb0588edb2caebe71f8272627a699d, NAME => 'TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:21:44,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4114): Remote procedure failed, pid=56 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0eeb0588edb2caebe71f8272627a699d, NAME => 'TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0eeb0588edb2caebe71f8272627a699d, NAME => 'TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:21:44,174 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/.tmp/A/c6923370c87145e9965cff9758c379bc as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/A/c6923370c87145e9965cff9758c379bc 2024-11-27T16:21:44,180 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/A/c6923370c87145e9965cff9758c379bc, entries=150, sequenceid=253, filesize=30.4 K 2024-11-27T16:21:44,182 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/.tmp/B/3963b05c221e4790a12833cddfb5f1c2 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/B/3963b05c221e4790a12833cddfb5f1c2 2024-11-27T16:21:44,186 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/B/3963b05c221e4790a12833cddfb5f1c2, entries=100, sequenceid=253, filesize=9.5 K 2024-11-27T16:21:44,188 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/.tmp/C/29da542e398046f5b7d5375b9d2fdfae as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/C/29da542e398046f5b7d5375b9d2fdfae 2024-11-27T16:21:44,192 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/C/29da542e398046f5b7d5375b9d2fdfae, entries=100, sequenceid=253, filesize=9.5 K 2024-11-27T16:21:44,193 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for 0eeb0588edb2caebe71f8272627a699d in 1389ms, sequenceid=253, compaction requested=true 2024-11-27T16:21:44,193 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 0eeb0588edb2caebe71f8272627a699d: 2024-11-27T16:21:44,194 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 0eeb0588edb2caebe71f8272627a699d:A, priority=-2147483648, current under compaction store size is 1 2024-11-27T16:21:44,194 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-27T16:21:44,194 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 0eeb0588edb2caebe71f8272627a699d:B, priority=-2147483648, current under compaction store size is 2 2024-11-27T16:21:44,194 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-11-27T16:21:44,194 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 0eeb0588edb2caebe71f8272627a699d:C, priority=-2147483648, current under compaction store size is 3 2024-11-27T16:21:44,194 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=3), splitQueue=0 2024-11-27T16:21:44,194 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-27T16:21:44,194 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-27T16:21:44,195 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 102433 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-27T16:21:44,195 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HStore(1540): 0eeb0588edb2caebe71f8272627a699d/A is initiating minor compaction (all files) 2024-11-27T16:21:44,195 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 34537 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-27T16:21:44,195 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 0eeb0588edb2caebe71f8272627a699d/A in TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d. 2024-11-27T16:21:44,195 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HStore(1540): 0eeb0588edb2caebe71f8272627a699d/C is initiating minor compaction (all files) 2024-11-27T16:21:44,195 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 0eeb0588edb2caebe71f8272627a699d/C in TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d. 2024-11-27T16:21:44,195 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/A/82022cf451fb4521b018879e91abafd8, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/A/1f6fc541afdc4655b4f1ca4fb7eb6d65, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/A/c6923370c87145e9965cff9758c379bc] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/.tmp, totalSize=100.0 K 2024-11-27T16:21:44,195 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/C/b7bd01a0b3fd44ef901efe6966a78353, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/C/b4bff3ba2a4d42fab6d83be557eb45e3, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/C/29da542e398046f5b7d5375b9d2fdfae] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/.tmp, totalSize=33.7 K 2024-11-27T16:21:44,196 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d. 2024-11-27T16:21:44,196 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d. files: [hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/A/82022cf451fb4521b018879e91abafd8, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/A/1f6fc541afdc4655b4f1ca4fb7eb6d65, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/A/c6923370c87145e9965cff9758c379bc] 2024-11-27T16:21:44,196 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 82022cf451fb4521b018879e91abafd8, keycount=150, bloomtype=ROW, size=30.8 K, encoding=NONE, compression=NONE, seqNum=213, earliestPutTs=1732724501409 2024-11-27T16:21:44,196 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting b7bd01a0b3fd44ef901efe6966a78353, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=213, earliestPutTs=1732724501409 2024-11-27T16:21:44,197 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1f6fc541afdc4655b4f1ca4fb7eb6d65, keycount=200, bloomtype=ROW, size=38.8 K, encoding=NONE, compression=NONE, seqNum=239, earliestPutTs=1732724501538 2024-11-27T16:21:44,197 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.Compactor(224): Compacting c6923370c87145e9965cff9758c379bc, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=253, earliestPutTs=1732724502181 2024-11-27T16:21:44,197 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting b4bff3ba2a4d42fab6d83be557eb45e3, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=239, earliestPutTs=1732724501539 2024-11-27T16:21:44,198 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting 29da542e398046f5b7d5375b9d2fdfae, keycount=100, bloomtype=ROW, size=9.5 K, encoding=NONE, compression=NONE, seqNum=253, earliestPutTs=1732724502181 2024-11-27T16:21:44,221 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=0eeb0588edb2caebe71f8272627a699d] 2024-11-27T16:21:44,222 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 0eeb0588edb2caebe71f8272627a699d#C#compaction#171 average throughput is unlimited, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-27T16:21:44,222 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/.tmp/C/42f1c5395a2548e6b73a11d4c7663cbe is 50, key is test_row_0/C:col10/1732724502181/Put/seqid=0 2024-11-27T16:21:44,224 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202411279d95306222774533b012b93a30b73ff4_0eeb0588edb2caebe71f8272627a699d store=[table=TestAcidGuarantees family=A region=0eeb0588edb2caebe71f8272627a699d] 2024-11-27T16:21:44,225 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202411279d95306222774533b012b93a30b73ff4_0eeb0588edb2caebe71f8272627a699d, store=[table=TestAcidGuarantees family=A region=0eeb0588edb2caebe71f8272627a699d] 2024-11-27T16:21:44,226 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411279d95306222774533b012b93a30b73ff4_0eeb0588edb2caebe71f8272627a699d because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=0eeb0588edb2caebe71f8272627a699d] 2024-11-27T16:21:44,252 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742031_1207 (size=4469) 2024-11-27T16:21:44,254 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 0eeb0588edb2caebe71f8272627a699d#A#compaction#172 average throughput is 0.76 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-27T16:21:44,254 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/.tmp/A/47028818db9d4134be8c4aeb48be71b0 is 175, key is test_row_0/A:col10/1732724502181/Put/seqid=0 2024-11-27T16:21:44,257 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742030_1206 (size=12731) 2024-11-27T16:21:44,259 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742032_1208 (size=31792) 2024-11-27T16:21:44,266 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/.tmp/A/47028818db9d4134be8c4aeb48be71b0 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/A/47028818db9d4134be8c4aeb48be71b0 2024-11-27T16:21:44,272 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/.tmp/C/42f1c5395a2548e6b73a11d4c7663cbe as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/C/42f1c5395a2548e6b73a11d4c7663cbe 2024-11-27T16:21:44,284 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 0eeb0588edb2caebe71f8272627a699d/A of 0eeb0588edb2caebe71f8272627a699d into 47028818db9d4134be8c4aeb48be71b0(size=31.0 K), total size for store is 31.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T16:21:44,284 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 0eeb0588edb2caebe71f8272627a699d: 2024-11-27T16:21:44,285 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d., storeName=0eeb0588edb2caebe71f8272627a699d/A, priority=13, startTime=1732724504193; duration=0sec 2024-11-27T16:21:44,285 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-27T16:21:44,285 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 0eeb0588edb2caebe71f8272627a699d:A 2024-11-27T16:21:44,285 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-27T16:21:44,286 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 0eeb0588edb2caebe71f8272627a699d/C of 0eeb0588edb2caebe71f8272627a699d into 42f1c5395a2548e6b73a11d4c7663cbe(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T16:21:44,286 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 0eeb0588edb2caebe71f8272627a699d: 2024-11-27T16:21:44,286 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d., storeName=0eeb0588edb2caebe71f8272627a699d/C, priority=13, startTime=1732724504194; duration=0sec 2024-11-27T16:21:44,286 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T16:21:44,286 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 0eeb0588edb2caebe71f8272627a699d:C 2024-11-27T16:21:44,287 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 34537 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-27T16:21:44,287 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HStore(1540): 0eeb0588edb2caebe71f8272627a699d/B is initiating minor compaction (all files) 2024-11-27T16:21:44,287 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 0eeb0588edb2caebe71f8272627a699d/B in TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d. 2024-11-27T16:21:44,287 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/B/49bc75baae1d4671b939a8ff7d58e5bd, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/B/0d24befe3d2b4a1fb058b0599d7aa4a8, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/B/3963b05c221e4790a12833cddfb5f1c2] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/.tmp, totalSize=33.7 K 2024-11-27T16:21:44,288 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 49bc75baae1d4671b939a8ff7d58e5bd, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=213, earliestPutTs=1732724501409 2024-11-27T16:21:44,289 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0d24befe3d2b4a1fb058b0599d7aa4a8, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=239, earliestPutTs=1732724501539 2024-11-27T16:21:44,289 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3963b05c221e4790a12833cddfb5f1c2, keycount=100, bloomtype=ROW, size=9.5 K, encoding=NONE, compression=NONE, seqNum=253, earliestPutTs=1732724502181 2024-11-27T16:21:44,306 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 0eeb0588edb2caebe71f8272627a699d#B#compaction#173 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-27T16:21:44,306 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/.tmp/B/11bd5b71690c4ec69e8dba809fde796b is 50, key is test_row_0/B:col10/1732724502181/Put/seqid=0 2024-11-27T16:21:44,308 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 0eeb0588edb2caebe71f8272627a699d 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-27T16:21:44,308 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0eeb0588edb2caebe71f8272627a699d, store=A 2024-11-27T16:21:44,308 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:21:44,308 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0eeb0588edb2caebe71f8272627a699d, store=B 2024-11-27T16:21:44,308 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:21:44,309 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0eeb0588edb2caebe71f8272627a699d, store=C 2024-11-27T16:21:44,309 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:21:44,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(8581): Flush requested on 0eeb0588edb2caebe71f8272627a699d 2024-11-27T16:21:44,323 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:21:44,323 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=56 2024-11-27T16:21:44,323 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d. 2024-11-27T16:21:44,323 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d. as already flushing 2024-11-27T16:21:44,324 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d. 2024-11-27T16:21:44,324 ERROR [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] handler.RSProcedureHandler(58): pid=56 java.io.IOException: Unable to complete flush {ENCODED => 0eeb0588edb2caebe71f8272627a699d, NAME => 'TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:21:44,324 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=56 java.io.IOException: Unable to complete flush {ENCODED => 0eeb0588edb2caebe71f8272627a699d, NAME => 'TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:21:44,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4114): Remote procedure failed, pid=56 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0eeb0588edb2caebe71f8272627a699d, NAME => 'TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0eeb0588edb2caebe71f8272627a699d, NAME => 'TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:21:44,326 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742033_1209 (size=12731) 2024-11-27T16:21:44,327 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:44,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39180 deadline: 1732724564324, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:44,328 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:44,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39168 deadline: 1732724564325, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:44,334 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241127e3180e6c5ed646aeb0c5f5a5511892f8_0eeb0588edb2caebe71f8272627a699d is 50, key is test_row_0/A:col10/1732724504306/Put/seqid=0 2024-11-27T16:21:44,345 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/.tmp/B/11bd5b71690c4ec69e8dba809fde796b as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/B/11bd5b71690c4ec69e8dba809fde796b 2024-11-27T16:21:44,351 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 0eeb0588edb2caebe71f8272627a699d/B of 0eeb0588edb2caebe71f8272627a699d into 11bd5b71690c4ec69e8dba809fde796b(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T16:21:44,351 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 0eeb0588edb2caebe71f8272627a699d: 2024-11-27T16:21:44,351 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d., storeName=0eeb0588edb2caebe71f8272627a699d/B, priority=13, startTime=1732724504194; duration=0sec 2024-11-27T16:21:44,351 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T16:21:44,352 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 0eeb0588edb2caebe71f8272627a699d:B 2024-11-27T16:21:44,354 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742034_1210 (size=12454) 2024-11-27T16:21:44,431 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:44,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39180 deadline: 1732724564429, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:44,431 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:44,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39168 deadline: 1732724564430, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:44,476 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:21:44,477 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=56 2024-11-27T16:21:44,477 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d. 2024-11-27T16:21:44,477 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d. as already flushing 2024-11-27T16:21:44,477 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d. 2024-11-27T16:21:44,477 ERROR [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] handler.RSProcedureHandler(58): pid=56 java.io.IOException: Unable to complete flush {ENCODED => 0eeb0588edb2caebe71f8272627a699d, NAME => 'TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:21:44,477 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=56 java.io.IOException: Unable to complete flush {ENCODED => 0eeb0588edb2caebe71f8272627a699d, NAME => 'TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:21:44,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4114): Remote procedure failed, pid=56 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0eeb0588edb2caebe71f8272627a699d, NAME => 'TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0eeb0588edb2caebe71f8272627a699d, NAME => 'TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:21:44,629 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:21:44,630 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=56 2024-11-27T16:21:44,630 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d. 2024-11-27T16:21:44,630 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d. as already flushing 2024-11-27T16:21:44,630 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d. 2024-11-27T16:21:44,630 ERROR [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] handler.RSProcedureHandler(58): pid=56 java.io.IOException: Unable to complete flush {ENCODED => 0eeb0588edb2caebe71f8272627a699d, NAME => 'TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:21:44,630 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=56 java.io.IOException: Unable to complete flush {ENCODED => 0eeb0588edb2caebe71f8272627a699d, NAME => 'TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:21:44,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4114): Remote procedure failed, pid=56 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0eeb0588edb2caebe71f8272627a699d, NAME => 'TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0eeb0588edb2caebe71f8272627a699d, NAME => 'TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:21:44,633 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:44,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39180 deadline: 1732724564632, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:44,633 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:44,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39168 deadline: 1732724564632, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:44,754 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:44,758 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241127e3180e6c5ed646aeb0c5f5a5511892f8_0eeb0588edb2caebe71f8272627a699d to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241127e3180e6c5ed646aeb0c5f5a5511892f8_0eeb0588edb2caebe71f8272627a699d 2024-11-27T16:21:44,760 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/.tmp/A/c87ad64e65644fffb7eda39027faf9e5, store: [table=TestAcidGuarantees family=A region=0eeb0588edb2caebe71f8272627a699d] 2024-11-27T16:21:44,760 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/.tmp/A/c87ad64e65644fffb7eda39027faf9e5 is 175, key is test_row_0/A:col10/1732724504306/Put/seqid=0 2024-11-27T16:21:44,767 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742035_1211 (size=31255) 2024-11-27T16:21:44,782 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:21:44,782 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=56 2024-11-27T16:21:44,782 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d. 2024-11-27T16:21:44,783 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d. as already flushing 2024-11-27T16:21:44,783 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d. 2024-11-27T16:21:44,783 ERROR [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] handler.RSProcedureHandler(58): pid=56 java.io.IOException: Unable to complete flush {ENCODED => 0eeb0588edb2caebe71f8272627a699d, NAME => 'TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:21:44,783 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=56 java.io.IOException: Unable to complete flush {ENCODED => 0eeb0588edb2caebe71f8272627a699d, NAME => 'TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:21:44,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4114): Remote procedure failed, pid=56 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0eeb0588edb2caebe71f8272627a699d, NAME => 'TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0eeb0588edb2caebe71f8272627a699d, NAME => 'TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:21:44,935 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:21:44,935 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=56 2024-11-27T16:21:44,936 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d. 2024-11-27T16:21:44,936 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d. as already flushing 2024-11-27T16:21:44,936 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d. 2024-11-27T16:21:44,936 ERROR [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] handler.RSProcedureHandler(58): pid=56 java.io.IOException: Unable to complete flush {ENCODED => 0eeb0588edb2caebe71f8272627a699d, NAME => 'TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:21:44,936 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=56 java.io.IOException: Unable to complete flush {ENCODED => 0eeb0588edb2caebe71f8272627a699d, NAME => 'TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:21:44,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4114): Remote procedure failed, pid=56 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0eeb0588edb2caebe71f8272627a699d, NAME => 'TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0eeb0588edb2caebe71f8272627a699d, NAME => 'TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:21:44,939 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:44,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39180 deadline: 1732724564935, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:44,940 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:44,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39168 deadline: 1732724564934, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:44,978 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:44,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39208 deadline: 1732724564978, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:44,981 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:44,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39228 deadline: 1732724564979, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:44,995 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:44,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39264 deadline: 1732724564992, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:45,088 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:21:45,088 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=56 2024-11-27T16:21:45,088 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d. 2024-11-27T16:21:45,089 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d. as already flushing 2024-11-27T16:21:45,089 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d. 2024-11-27T16:21:45,089 ERROR [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] handler.RSProcedureHandler(58): pid=56 java.io.IOException: Unable to complete flush {ENCODED => 0eeb0588edb2caebe71f8272627a699d, NAME => 'TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:21:45,089 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=56 java.io.IOException: Unable to complete flush {ENCODED => 0eeb0588edb2caebe71f8272627a699d, NAME => 'TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:21:45,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4114): Remote procedure failed, pid=56 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0eeb0588edb2caebe71f8272627a699d, NAME => 'TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0eeb0588edb2caebe71f8272627a699d, NAME => 'TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:21:45,169 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=281, memsize=49.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/.tmp/A/c87ad64e65644fffb7eda39027faf9e5 2024-11-27T16:21:45,182 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/.tmp/B/6ce4e5d80b5e4d29af72bb9bccad87fb is 50, key is test_row_0/B:col10/1732724504306/Put/seqid=0 2024-11-27T16:21:45,192 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742036_1212 (size=12301) 2024-11-27T16:21:45,194 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=281 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/.tmp/B/6ce4e5d80b5e4d29af72bb9bccad87fb 2024-11-27T16:21:45,205 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/.tmp/C/6c4a37c215b44ba29e4154f48ecd435b is 50, key is test_row_0/C:col10/1732724504306/Put/seqid=0 2024-11-27T16:21:45,218 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742037_1213 (size=12301) 2024-11-27T16:21:45,219 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=281 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/.tmp/C/6c4a37c215b44ba29e4154f48ecd435b 2024-11-27T16:21:45,226 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/.tmp/A/c87ad64e65644fffb7eda39027faf9e5 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/A/c87ad64e65644fffb7eda39027faf9e5 2024-11-27T16:21:45,230 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/A/c87ad64e65644fffb7eda39027faf9e5, entries=150, sequenceid=281, filesize=30.5 K 2024-11-27T16:21:45,232 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/.tmp/B/6ce4e5d80b5e4d29af72bb9bccad87fb as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/B/6ce4e5d80b5e4d29af72bb9bccad87fb 2024-11-27T16:21:45,238 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/B/6ce4e5d80b5e4d29af72bb9bccad87fb, entries=150, sequenceid=281, filesize=12.0 K 2024-11-27T16:21:45,240 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/.tmp/C/6c4a37c215b44ba29e4154f48ecd435b as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/C/6c4a37c215b44ba29e4154f48ecd435b 2024-11-27T16:21:45,241 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:21:45,242 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=56 2024-11-27T16:21:45,242 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d. 2024-11-27T16:21:45,242 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d. as already flushing 2024-11-27T16:21:45,242 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d. 2024-11-27T16:21:45,242 ERROR [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] handler.RSProcedureHandler(58): pid=56 java.io.IOException: Unable to complete flush {ENCODED => 0eeb0588edb2caebe71f8272627a699d, NAME => 'TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:21:45,242 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=56 java.io.IOException: Unable to complete flush {ENCODED => 0eeb0588edb2caebe71f8272627a699d, NAME => 'TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:21:45,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4114): Remote procedure failed, pid=56 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0eeb0588edb2caebe71f8272627a699d, NAME => 'TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0eeb0588edb2caebe71f8272627a699d, NAME => 'TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:21:45,246 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/C/6c4a37c215b44ba29e4154f48ecd435b, entries=150, sequenceid=281, filesize=12.0 K 2024-11-27T16:21:45,249 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for 0eeb0588edb2caebe71f8272627a699d in 941ms, sequenceid=281, compaction requested=false 2024-11-27T16:21:45,249 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 0eeb0588edb2caebe71f8272627a699d: 2024-11-27T16:21:45,395 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:21:45,395 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=56 2024-11-27T16:21:45,395 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d. 2024-11-27T16:21:45,395 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2837): Flushing 0eeb0588edb2caebe71f8272627a699d 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-27T16:21:45,396 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0eeb0588edb2caebe71f8272627a699d, store=A 2024-11-27T16:21:45,396 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:21:45,396 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0eeb0588edb2caebe71f8272627a699d, store=B 2024-11-27T16:21:45,396 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:21:45,396 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0eeb0588edb2caebe71f8272627a699d, store=C 2024-11-27T16:21:45,396 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:21:45,403 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241127b7f280e6209948da9ba3614bf9262889_0eeb0588edb2caebe71f8272627a699d is 50, key is test_row_0/A:col10/1732724504318/Put/seqid=0 2024-11-27T16:21:45,407 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742038_1214 (size=12454) 2024-11-27T16:21:45,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(8581): Flush requested on 0eeb0588edb2caebe71f8272627a699d 2024-11-27T16:21:45,441 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d. as already flushing 2024-11-27T16:21:45,482 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:45,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39180 deadline: 1732724565481, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:45,486 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:45,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 167 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39168 deadline: 1732724565484, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:45,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-11-27T16:21:45,585 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:45,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39180 deadline: 1732724565585, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:45,590 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:45,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 169 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39168 deadline: 1732724565587, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:45,790 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:45,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39180 deadline: 1732724565788, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:45,793 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:45,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 171 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39168 deadline: 1732724565793, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:45,808 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:45,814 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241127b7f280e6209948da9ba3614bf9262889_0eeb0588edb2caebe71f8272627a699d to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241127b7f280e6209948da9ba3614bf9262889_0eeb0588edb2caebe71f8272627a699d 2024-11-27T16:21:45,816 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/.tmp/A/6dcd6eeea4514af79590dbcbe02fe48b, store: [table=TestAcidGuarantees family=A region=0eeb0588edb2caebe71f8272627a699d] 2024-11-27T16:21:45,816 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/.tmp/A/6dcd6eeea4514af79590dbcbe02fe48b is 175, key is test_row_0/A:col10/1732724504318/Put/seqid=0 2024-11-27T16:21:45,824 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742039_1215 (size=31255) 2024-11-27T16:21:45,825 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=293, memsize=17.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/.tmp/A/6dcd6eeea4514af79590dbcbe02fe48b 2024-11-27T16:21:45,839 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/.tmp/B/dcd75ebf9b234721b7b7b22e926a73cf is 50, key is test_row_0/B:col10/1732724504318/Put/seqid=0 2024-11-27T16:21:45,846 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742040_1216 (size=12301) 2024-11-27T16:21:45,847 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=293 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/.tmp/B/dcd75ebf9b234721b7b7b22e926a73cf 2024-11-27T16:21:45,857 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/.tmp/C/f4550b3123ee4c7db257d066c11bc4a9 is 50, key is test_row_0/C:col10/1732724504318/Put/seqid=0 2024-11-27T16:21:45,865 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742041_1217 (size=12301) 2024-11-27T16:21:45,868 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=293 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/.tmp/C/f4550b3123ee4c7db257d066c11bc4a9 2024-11-27T16:21:45,873 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/.tmp/A/6dcd6eeea4514af79590dbcbe02fe48b as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/A/6dcd6eeea4514af79590dbcbe02fe48b 2024-11-27T16:21:45,878 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/A/6dcd6eeea4514af79590dbcbe02fe48b, entries=150, sequenceid=293, filesize=30.5 K 2024-11-27T16:21:45,880 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/.tmp/B/dcd75ebf9b234721b7b7b22e926a73cf as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/B/dcd75ebf9b234721b7b7b22e926a73cf 2024-11-27T16:21:45,888 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/B/dcd75ebf9b234721b7b7b22e926a73cf, entries=150, sequenceid=293, filesize=12.0 K 2024-11-27T16:21:45,897 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/.tmp/C/f4550b3123ee4c7db257d066c11bc4a9 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/C/f4550b3123ee4c7db257d066c11bc4a9 2024-11-27T16:21:45,906 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/C/f4550b3123ee4c7db257d066c11bc4a9, entries=150, sequenceid=293, filesize=12.0 K 2024-11-27T16:21:45,908 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for 0eeb0588edb2caebe71f8272627a699d in 513ms, sequenceid=293, compaction requested=true 2024-11-27T16:21:45,908 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2538): Flush status journal for 0eeb0588edb2caebe71f8272627a699d: 2024-11-27T16:21:45,908 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d. 2024-11-27T16:21:45,908 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=56 2024-11-27T16:21:45,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4106): Remote procedure done, pid=56 2024-11-27T16:21:45,911 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=56, resume processing ppid=55 2024-11-27T16:21:45,911 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=56, ppid=55, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 4.5080 sec 2024-11-27T16:21:45,914 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=55, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=55, table=TestAcidGuarantees in 4.5140 sec 2024-11-27T16:21:46,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(8581): Flush requested on 0eeb0588edb2caebe71f8272627a699d 2024-11-27T16:21:46,095 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 0eeb0588edb2caebe71f8272627a699d 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-11-27T16:21:46,095 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0eeb0588edb2caebe71f8272627a699d, store=A 2024-11-27T16:21:46,095 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:21:46,095 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0eeb0588edb2caebe71f8272627a699d, store=B 2024-11-27T16:21:46,095 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:21:46,095 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0eeb0588edb2caebe71f8272627a699d, store=C 2024-11-27T16:21:46,096 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:21:46,104 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241127dcf983f2b83e4b57a743638fea08831d_0eeb0588edb2caebe71f8272627a699d is 50, key is test_row_0/A:col10/1732724506094/Put/seqid=0 2024-11-27T16:21:46,109 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742042_1218 (size=14994) 2024-11-27T16:21:46,110 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:46,114 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:46,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39180 deadline: 1732724566114, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:46,115 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241127dcf983f2b83e4b57a743638fea08831d_0eeb0588edb2caebe71f8272627a699d to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241127dcf983f2b83e4b57a743638fea08831d_0eeb0588edb2caebe71f8272627a699d 2024-11-27T16:21:46,116 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/.tmp/A/f67fceacab1843f190a2254649f809aa, store: [table=TestAcidGuarantees family=A region=0eeb0588edb2caebe71f8272627a699d] 2024-11-27T16:21:46,116 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/.tmp/A/f67fceacab1843f190a2254649f809aa is 175, key is test_row_0/A:col10/1732724506094/Put/seqid=0 2024-11-27T16:21:46,119 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:46,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 176 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39168 deadline: 1732724566115, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:46,142 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742043_1219 (size=39949) 2024-11-27T16:21:46,153 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=319, memsize=51.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/.tmp/A/f67fceacab1843f190a2254649f809aa 2024-11-27T16:21:46,163 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/.tmp/B/66a4b15e90f74765b46ce1d63282ce44 is 50, key is test_row_0/B:col10/1732724506094/Put/seqid=0 2024-11-27T16:21:46,172 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742044_1220 (size=12301) 2024-11-27T16:21:46,181 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=319 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/.tmp/B/66a4b15e90f74765b46ce1d63282ce44 2024-11-27T16:21:46,190 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/.tmp/C/4b7506b8c18d4d08b5a29393adfe432f is 50, key is test_row_0/C:col10/1732724506094/Put/seqid=0 2024-11-27T16:21:46,196 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742045_1221 (size=12301) 2024-11-27T16:21:46,216 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:46,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39180 deadline: 1732724566216, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:46,223 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:46,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 178 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39168 deadline: 1732724566220, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:46,419 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:46,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39180 deadline: 1732724566417, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:46,426 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:46,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 180 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39168 deadline: 1732724566424, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:46,597 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=319 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/.tmp/C/4b7506b8c18d4d08b5a29393adfe432f 2024-11-27T16:21:46,611 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/.tmp/A/f67fceacab1843f190a2254649f809aa as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/A/f67fceacab1843f190a2254649f809aa 2024-11-27T16:21:46,615 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/A/f67fceacab1843f190a2254649f809aa, entries=200, sequenceid=319, filesize=39.0 K 2024-11-27T16:21:46,616 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/.tmp/B/66a4b15e90f74765b46ce1d63282ce44 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/B/66a4b15e90f74765b46ce1d63282ce44 2024-11-27T16:21:46,630 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/B/66a4b15e90f74765b46ce1d63282ce44, entries=150, sequenceid=319, filesize=12.0 K 2024-11-27T16:21:46,634 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/.tmp/C/4b7506b8c18d4d08b5a29393adfe432f as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/C/4b7506b8c18d4d08b5a29393adfe432f 2024-11-27T16:21:46,643 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/C/4b7506b8c18d4d08b5a29393adfe432f, entries=150, sequenceid=319, filesize=12.0 K 2024-11-27T16:21:46,644 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=46.96 KB/48090 for 0eeb0588edb2caebe71f8272627a699d in 548ms, sequenceid=319, compaction requested=true 2024-11-27T16:21:46,644 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 0eeb0588edb2caebe71f8272627a699d: 2024-11-27T16:21:46,644 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 0eeb0588edb2caebe71f8272627a699d:A, priority=-2147483648, current under compaction store size is 1 2024-11-27T16:21:46,644 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T16:21:46,644 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 0eeb0588edb2caebe71f8272627a699d:B, priority=-2147483648, current under compaction store size is 2 2024-11-27T16:21:46,644 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-27T16:21:46,644 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T16:21:46,644 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 0eeb0588edb2caebe71f8272627a699d:C, priority=-2147483648, current under compaction store size is 3 2024-11-27T16:21:46,644 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-27T16:21:46,644 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-27T16:21:46,646 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 134251 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-27T16:21:46,646 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HStore(1540): 0eeb0588edb2caebe71f8272627a699d/A is initiating minor compaction (all files) 2024-11-27T16:21:46,646 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 0eeb0588edb2caebe71f8272627a699d/A in TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d. 2024-11-27T16:21:46,646 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/A/47028818db9d4134be8c4aeb48be71b0, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/A/c87ad64e65644fffb7eda39027faf9e5, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/A/6dcd6eeea4514af79590dbcbe02fe48b, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/A/f67fceacab1843f190a2254649f809aa] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/.tmp, totalSize=131.1 K 2024-11-27T16:21:46,646 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=12 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d. 2024-11-27T16:21:46,646 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d. files: [hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/A/47028818db9d4134be8c4aeb48be71b0, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/A/c87ad64e65644fffb7eda39027faf9e5, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/A/6dcd6eeea4514af79590dbcbe02fe48b, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/A/f67fceacab1843f190a2254649f809aa] 2024-11-27T16:21:46,647 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 47028818db9d4134be8c4aeb48be71b0, keycount=150, bloomtype=ROW, size=31.0 K, encoding=NONE, compression=NONE, seqNum=253, earliestPutTs=1732724501539 2024-11-27T16:21:46,648 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.Compactor(224): Compacting c87ad64e65644fffb7eda39027faf9e5, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=281, earliestPutTs=1732724502833 2024-11-27T16:21:46,648 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49634 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-27T16:21:46,648 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HStore(1540): 0eeb0588edb2caebe71f8272627a699d/B is initiating minor compaction (all files) 2024-11-27T16:21:46,648 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 0eeb0588edb2caebe71f8272627a699d/B in TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d. 2024-11-27T16:21:46,649 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/B/11bd5b71690c4ec69e8dba809fde796b, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/B/6ce4e5d80b5e4d29af72bb9bccad87fb, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/B/dcd75ebf9b234721b7b7b22e926a73cf, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/B/66a4b15e90f74765b46ce1d63282ce44] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/.tmp, totalSize=48.5 K 2024-11-27T16:21:46,649 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6dcd6eeea4514af79590dbcbe02fe48b, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=293, earliestPutTs=1732724504314 2024-11-27T16:21:46,649 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting 11bd5b71690c4ec69e8dba809fde796b, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=253, earliestPutTs=1732724501539 2024-11-27T16:21:46,650 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting 6ce4e5d80b5e4d29af72bb9bccad87fb, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=281, earliestPutTs=1732724502833 2024-11-27T16:21:46,650 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.Compactor(224): Compacting f67fceacab1843f190a2254649f809aa, keycount=200, bloomtype=ROW, size=39.0 K, encoding=NONE, compression=NONE, seqNum=319, earliestPutTs=1732724505477 2024-11-27T16:21:46,651 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting dcd75ebf9b234721b7b7b22e926a73cf, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=293, earliestPutTs=1732724504314 2024-11-27T16:21:46,651 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting 66a4b15e90f74765b46ce1d63282ce44, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=319, earliestPutTs=1732724505477 2024-11-27T16:21:46,660 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=0eeb0588edb2caebe71f8272627a699d] 2024-11-27T16:21:46,665 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 0eeb0588edb2caebe71f8272627a699d#B#compaction#184 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-27T16:21:46,665 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/.tmp/B/05dc970bf774409aaaab085dd0275489 is 50, key is test_row_0/B:col10/1732724506094/Put/seqid=0 2024-11-27T16:21:46,668 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241127bf0c5629431741ecb785ab7280592496_0eeb0588edb2caebe71f8272627a699d store=[table=TestAcidGuarantees family=A region=0eeb0588edb2caebe71f8272627a699d] 2024-11-27T16:21:46,671 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742046_1222 (size=13017) 2024-11-27T16:21:46,671 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241127bf0c5629431741ecb785ab7280592496_0eeb0588edb2caebe71f8272627a699d, store=[table=TestAcidGuarantees family=A region=0eeb0588edb2caebe71f8272627a699d] 2024-11-27T16:21:46,671 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241127bf0c5629431741ecb785ab7280592496_0eeb0588edb2caebe71f8272627a699d because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=0eeb0588edb2caebe71f8272627a699d] 2024-11-27T16:21:46,678 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/.tmp/B/05dc970bf774409aaaab085dd0275489 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/B/05dc970bf774409aaaab085dd0275489 2024-11-27T16:21:46,682 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742047_1223 (size=4469) 2024-11-27T16:21:46,683 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 0eeb0588edb2caebe71f8272627a699d/B of 0eeb0588edb2caebe71f8272627a699d into 05dc970bf774409aaaab085dd0275489(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T16:21:46,683 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 0eeb0588edb2caebe71f8272627a699d: 2024-11-27T16:21:46,683 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d., storeName=0eeb0588edb2caebe71f8272627a699d/B, priority=12, startTime=1732724506644; duration=0sec 2024-11-27T16:21:46,683 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-27T16:21:46,683 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 0eeb0588edb2caebe71f8272627a699d:B 2024-11-27T16:21:46,684 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-27T16:21:46,686 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 0eeb0588edb2caebe71f8272627a699d#A#compaction#183 average throughput is 0.94 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-27T16:21:46,687 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/.tmp/A/638af244aa854b2ca178dfc9ae1e1def is 175, key is test_row_0/A:col10/1732724506094/Put/seqid=0 2024-11-27T16:21:46,688 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49634 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-27T16:21:46,688 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HStore(1540): 0eeb0588edb2caebe71f8272627a699d/C is initiating minor compaction (all files) 2024-11-27T16:21:46,688 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 0eeb0588edb2caebe71f8272627a699d/C in TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d. 2024-11-27T16:21:46,688 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/C/42f1c5395a2548e6b73a11d4c7663cbe, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/C/6c4a37c215b44ba29e4154f48ecd435b, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/C/f4550b3123ee4c7db257d066c11bc4a9, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/C/4b7506b8c18d4d08b5a29393adfe432f] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/.tmp, totalSize=48.5 K 2024-11-27T16:21:46,689 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting 42f1c5395a2548e6b73a11d4c7663cbe, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=253, earliestPutTs=1732724501539 2024-11-27T16:21:46,690 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting 6c4a37c215b44ba29e4154f48ecd435b, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=281, earliestPutTs=1732724502833 2024-11-27T16:21:46,690 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting f4550b3123ee4c7db257d066c11bc4a9, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=293, earliestPutTs=1732724504314 2024-11-27T16:21:46,691 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting 4b7506b8c18d4d08b5a29393adfe432f, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=319, earliestPutTs=1732724505477 2024-11-27T16:21:46,706 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 0eeb0588edb2caebe71f8272627a699d#C#compaction#185 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-27T16:21:46,707 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/.tmp/C/53e377fd583f4e4b854aae22103161a9 is 50, key is test_row_0/C:col10/1732724506094/Put/seqid=0 2024-11-27T16:21:46,725 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 0eeb0588edb2caebe71f8272627a699d 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-27T16:21:46,725 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0eeb0588edb2caebe71f8272627a699d, store=A 2024-11-27T16:21:46,725 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:21:46,725 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0eeb0588edb2caebe71f8272627a699d, store=B 2024-11-27T16:21:46,725 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:21:46,725 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0eeb0588edb2caebe71f8272627a699d, store=C 2024-11-27T16:21:46,725 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:21:46,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(8581): Flush requested on 0eeb0588edb2caebe71f8272627a699d 2024-11-27T16:21:46,732 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742048_1224 (size=31971) 2024-11-27T16:21:46,743 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112713d7ae4d23824f72a917b7d499ee2401_0eeb0588edb2caebe71f8272627a699d is 50, key is test_row_0/A:col10/1732724506112/Put/seqid=0 2024-11-27T16:21:46,745 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742049_1225 (size=13017) 2024-11-27T16:21:46,747 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742050_1226 (size=14994) 2024-11-27T16:21:46,773 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:46,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 192 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39168 deadline: 1732724566770, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:46,774 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:46,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39180 deadline: 1732724566773, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:46,875 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:46,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 194 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39168 deadline: 1732724566874, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:46,877 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:46,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39180 deadline: 1732724566875, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:46,990 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:46,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39208 deadline: 1732724566987, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:46,992 DEBUG [Thread-679 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4133 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d., hostname=7b191dec6496,44169,1732724452967, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-27T16:21:46,998 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:46,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39228 deadline: 1732724566995, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:46,999 DEBUG [Thread-673 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4143 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d., hostname=7b191dec6496,44169,1732724452967, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-27T16:21:47,014 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:47,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39264 deadline: 1732724567011, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:47,015 DEBUG [Thread-677 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4154 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d., hostname=7b191dec6496,44169,1732724452967, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-27T16:21:47,077 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:47,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 196 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39168 deadline: 1732724567076, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:47,081 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:47,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39180 deadline: 1732724567079, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:47,146 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/.tmp/A/638af244aa854b2ca178dfc9ae1e1def as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/A/638af244aa854b2ca178dfc9ae1e1def 2024-11-27T16:21:47,149 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:47,159 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112713d7ae4d23824f72a917b7d499ee2401_0eeb0588edb2caebe71f8272627a699d to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112713d7ae4d23824f72a917b7d499ee2401_0eeb0588edb2caebe71f8272627a699d 2024-11-27T16:21:47,161 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/.tmp/A/841af90e0c344b2a929b5a4fb85b7b4b, store: [table=TestAcidGuarantees family=A region=0eeb0588edb2caebe71f8272627a699d] 2024-11-27T16:21:47,162 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/.tmp/A/841af90e0c344b2a929b5a4fb85b7b4b is 175, key is test_row_0/A:col10/1732724506112/Put/seqid=0 2024-11-27T16:21:47,163 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 0eeb0588edb2caebe71f8272627a699d/A of 0eeb0588edb2caebe71f8272627a699d into 638af244aa854b2ca178dfc9ae1e1def(size=31.2 K), total size for store is 31.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T16:21:47,163 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 0eeb0588edb2caebe71f8272627a699d: 2024-11-27T16:21:47,163 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d., storeName=0eeb0588edb2caebe71f8272627a699d/A, priority=12, startTime=1732724506644; duration=0sec 2024-11-27T16:21:47,163 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T16:21:47,163 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 0eeb0588edb2caebe71f8272627a699d:A 2024-11-27T16:21:47,163 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/.tmp/C/53e377fd583f4e4b854aae22103161a9 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/C/53e377fd583f4e4b854aae22103161a9 2024-11-27T16:21:47,171 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 0eeb0588edb2caebe71f8272627a699d/C of 0eeb0588edb2caebe71f8272627a699d into 53e377fd583f4e4b854aae22103161a9(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T16:21:47,171 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 0eeb0588edb2caebe71f8272627a699d: 2024-11-27T16:21:47,171 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d., storeName=0eeb0588edb2caebe71f8272627a699d/C, priority=12, startTime=1732724506644; duration=0sec 2024-11-27T16:21:47,171 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T16:21:47,171 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 0eeb0588edb2caebe71f8272627a699d:C 2024-11-27T16:21:47,179 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742051_1227 (size=39949) 2024-11-27T16:21:47,180 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=331, memsize=17.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/.tmp/A/841af90e0c344b2a929b5a4fb85b7b4b 2024-11-27T16:21:47,193 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/.tmp/B/d7e5f711e45a4dbd9c32dedb69bf3f15 is 50, key is test_row_0/B:col10/1732724506112/Put/seqid=0 2024-11-27T16:21:47,198 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742052_1228 (size=12301) 2024-11-27T16:21:47,204 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=331 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/.tmp/B/d7e5f711e45a4dbd9c32dedb69bf3f15 2024-11-27T16:21:47,213 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/.tmp/C/170861ebad7846fd8993bbbe3f30b1ba is 50, key is test_row_0/C:col10/1732724506112/Put/seqid=0 2024-11-27T16:21:47,218 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742053_1229 (size=12301) 2024-11-27T16:21:47,220 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=331 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/.tmp/C/170861ebad7846fd8993bbbe3f30b1ba 2024-11-27T16:21:47,225 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/.tmp/A/841af90e0c344b2a929b5a4fb85b7b4b as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/A/841af90e0c344b2a929b5a4fb85b7b4b 2024-11-27T16:21:47,234 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/A/841af90e0c344b2a929b5a4fb85b7b4b, entries=200, sequenceid=331, filesize=39.0 K 2024-11-27T16:21:47,235 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/.tmp/B/d7e5f711e45a4dbd9c32dedb69bf3f15 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/B/d7e5f711e45a4dbd9c32dedb69bf3f15 2024-11-27T16:21:47,242 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/B/d7e5f711e45a4dbd9c32dedb69bf3f15, entries=150, sequenceid=331, filesize=12.0 K 2024-11-27T16:21:47,248 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/.tmp/C/170861ebad7846fd8993bbbe3f30b1ba as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/C/170861ebad7846fd8993bbbe3f30b1ba 2024-11-27T16:21:47,253 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/C/170861ebad7846fd8993bbbe3f30b1ba, entries=150, sequenceid=331, filesize=12.0 K 2024-11-27T16:21:47,254 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for 0eeb0588edb2caebe71f8272627a699d in 530ms, sequenceid=331, compaction requested=false 2024-11-27T16:21:47,254 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 0eeb0588edb2caebe71f8272627a699d: 2024-11-27T16:21:47,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(8581): Flush requested on 0eeb0588edb2caebe71f8272627a699d 2024-11-27T16:21:47,383 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 0eeb0588edb2caebe71f8272627a699d 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-11-27T16:21:47,383 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0eeb0588edb2caebe71f8272627a699d, store=A 2024-11-27T16:21:47,383 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:21:47,383 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0eeb0588edb2caebe71f8272627a699d, store=B 2024-11-27T16:21:47,383 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:21:47,383 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0eeb0588edb2caebe71f8272627a699d, store=C 2024-11-27T16:21:47,383 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:21:47,392 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411275d7195e73ff746b48cf94908f7447f4d_0eeb0588edb2caebe71f8272627a699d is 50, key is test_row_0/A:col10/1732724506769/Put/seqid=0 2024-11-27T16:21:47,398 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:47,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39180 deadline: 1732724567397, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:47,398 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:47,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 203 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39168 deadline: 1732724567398, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:47,406 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742054_1230 (size=12454) 2024-11-27T16:21:47,503 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:47,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 205 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39168 deadline: 1732724567502, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:47,507 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:47,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 147 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39180 deadline: 1732724567507, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:47,707 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:47,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 207 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39168 deadline: 1732724567705, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:47,710 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:47,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 149 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39180 deadline: 1732724567708, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:47,807 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:47,811 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411275d7195e73ff746b48cf94908f7447f4d_0eeb0588edb2caebe71f8272627a699d to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411275d7195e73ff746b48cf94908f7447f4d_0eeb0588edb2caebe71f8272627a699d 2024-11-27T16:21:47,812 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/.tmp/A/ed7a61b731ee4e98b93622672cdb02aa, store: [table=TestAcidGuarantees family=A region=0eeb0588edb2caebe71f8272627a699d] 2024-11-27T16:21:47,813 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/.tmp/A/ed7a61b731ee4e98b93622672cdb02aa is 175, key is test_row_0/A:col10/1732724506769/Put/seqid=0 2024-11-27T16:21:47,817 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742055_1231 (size=31255) 2024-11-27T16:21:48,011 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:48,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 209 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39168 deadline: 1732724568010, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:48,015 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:48,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 151 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39180 deadline: 1732724568013, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:48,218 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=359, memsize=51.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/.tmp/A/ed7a61b731ee4e98b93622672cdb02aa 2024-11-27T16:21:48,226 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/.tmp/B/ddc0db6e1cd0401b9bf17c4bd038eea1 is 50, key is test_row_0/B:col10/1732724506769/Put/seqid=0 2024-11-27T16:21:48,230 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742056_1232 (size=12301) 2024-11-27T16:21:48,236 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=359 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/.tmp/B/ddc0db6e1cd0401b9bf17c4bd038eea1 2024-11-27T16:21:48,244 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/.tmp/C/a4d9ab1f54574a938d9b2928b532491a is 50, key is test_row_0/C:col10/1732724506769/Put/seqid=0 2024-11-27T16:21:48,247 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742057_1233 (size=12301) 2024-11-27T16:21:48,513 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:48,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 211 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39168 deadline: 1732724568512, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:48,520 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:48,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 153 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39180 deadline: 1732724568520, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:48,648 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=359 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/.tmp/C/a4d9ab1f54574a938d9b2928b532491a 2024-11-27T16:21:48,654 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/.tmp/A/ed7a61b731ee4e98b93622672cdb02aa as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/A/ed7a61b731ee4e98b93622672cdb02aa 2024-11-27T16:21:48,657 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/A/ed7a61b731ee4e98b93622672cdb02aa, entries=150, sequenceid=359, filesize=30.5 K 2024-11-27T16:21:48,658 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/.tmp/B/ddc0db6e1cd0401b9bf17c4bd038eea1 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/B/ddc0db6e1cd0401b9bf17c4bd038eea1 2024-11-27T16:21:48,663 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/B/ddc0db6e1cd0401b9bf17c4bd038eea1, entries=150, sequenceid=359, filesize=12.0 K 2024-11-27T16:21:48,664 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/.tmp/C/a4d9ab1f54574a938d9b2928b532491a as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/C/a4d9ab1f54574a938d9b2928b532491a 2024-11-27T16:21:48,668 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/C/a4d9ab1f54574a938d9b2928b532491a, entries=150, sequenceid=359, filesize=12.0 K 2024-11-27T16:21:48,669 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=46.96 KB/48090 for 0eeb0588edb2caebe71f8272627a699d in 1286ms, sequenceid=359, compaction requested=true 2024-11-27T16:21:48,669 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 0eeb0588edb2caebe71f8272627a699d: 2024-11-27T16:21:48,669 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 0eeb0588edb2caebe71f8272627a699d:A, priority=-2147483648, current under compaction store size is 1 2024-11-27T16:21:48,669 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T16:21:48,669 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-27T16:21:48,669 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-27T16:21:48,669 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 0eeb0588edb2caebe71f8272627a699d:B, priority=-2147483648, current under compaction store size is 2 2024-11-27T16:21:48,669 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T16:21:48,669 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 0eeb0588edb2caebe71f8272627a699d:C, priority=-2147483648, current under compaction store size is 3 2024-11-27T16:21:48,669 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-27T16:21:48,670 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 103175 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-27T16:21:48,670 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37619 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-27T16:21:48,670 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HStore(1540): 0eeb0588edb2caebe71f8272627a699d/A is initiating minor compaction (all files) 2024-11-27T16:21:48,670 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HStore(1540): 0eeb0588edb2caebe71f8272627a699d/B is initiating minor compaction (all files) 2024-11-27T16:21:48,670 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 0eeb0588edb2caebe71f8272627a699d/A in TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d. 2024-11-27T16:21:48,670 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 0eeb0588edb2caebe71f8272627a699d/B in TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d. 2024-11-27T16:21:48,670 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/B/05dc970bf774409aaaab085dd0275489, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/B/d7e5f711e45a4dbd9c32dedb69bf3f15, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/B/ddc0db6e1cd0401b9bf17c4bd038eea1] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/.tmp, totalSize=36.7 K 2024-11-27T16:21:48,670 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/A/638af244aa854b2ca178dfc9ae1e1def, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/A/841af90e0c344b2a929b5a4fb85b7b4b, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/A/ed7a61b731ee4e98b93622672cdb02aa] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/.tmp, totalSize=100.8 K 2024-11-27T16:21:48,671 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d. 2024-11-27T16:21:48,671 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d. files: [hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/A/638af244aa854b2ca178dfc9ae1e1def, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/A/841af90e0c344b2a929b5a4fb85b7b4b, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/A/ed7a61b731ee4e98b93622672cdb02aa] 2024-11-27T16:21:48,671 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting 05dc970bf774409aaaab085dd0275489, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=319, earliestPutTs=1732724505477 2024-11-27T16:21:48,671 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 638af244aa854b2ca178dfc9ae1e1def, keycount=150, bloomtype=ROW, size=31.2 K, encoding=NONE, compression=NONE, seqNum=319, earliestPutTs=1732724505477 2024-11-27T16:21:48,671 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting d7e5f711e45a4dbd9c32dedb69bf3f15, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=331, earliestPutTs=1732724506101 2024-11-27T16:21:48,672 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 841af90e0c344b2a929b5a4fb85b7b4b, keycount=200, bloomtype=ROW, size=39.0 K, encoding=NONE, compression=NONE, seqNum=331, earliestPutTs=1732724506101 2024-11-27T16:21:48,672 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting ddc0db6e1cd0401b9bf17c4bd038eea1, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=359, earliestPutTs=1732724506769 2024-11-27T16:21:48,672 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.Compactor(224): Compacting ed7a61b731ee4e98b93622672cdb02aa, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=359, earliestPutTs=1732724506769 2024-11-27T16:21:48,681 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 0eeb0588edb2caebe71f8272627a699d#B#compaction#192 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-27T16:21:48,682 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/.tmp/B/efd64eadfc164ea6824b3e112eee7b17 is 50, key is test_row_0/B:col10/1732724506769/Put/seqid=0 2024-11-27T16:21:48,684 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=0eeb0588edb2caebe71f8272627a699d] 2024-11-27T16:21:48,690 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e2024112781d4cea1858c47c1840e6d55bfdeb0bd_0eeb0588edb2caebe71f8272627a699d store=[table=TestAcidGuarantees family=A region=0eeb0588edb2caebe71f8272627a699d] 2024-11-27T16:21:48,692 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e2024112781d4cea1858c47c1840e6d55bfdeb0bd_0eeb0588edb2caebe71f8272627a699d, store=[table=TestAcidGuarantees family=A region=0eeb0588edb2caebe71f8272627a699d] 2024-11-27T16:21:48,693 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112781d4cea1858c47c1840e6d55bfdeb0bd_0eeb0588edb2caebe71f8272627a699d because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=0eeb0588edb2caebe71f8272627a699d] 2024-11-27T16:21:48,695 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742058_1234 (size=13119) 2024-11-27T16:21:48,701 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/.tmp/B/efd64eadfc164ea6824b3e112eee7b17 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/B/efd64eadfc164ea6824b3e112eee7b17 2024-11-27T16:21:48,701 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742059_1235 (size=4469) 2024-11-27T16:21:48,706 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 0eeb0588edb2caebe71f8272627a699d/B of 0eeb0588edb2caebe71f8272627a699d into efd64eadfc164ea6824b3e112eee7b17(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T16:21:48,706 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 0eeb0588edb2caebe71f8272627a699d: 2024-11-27T16:21:48,706 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d., storeName=0eeb0588edb2caebe71f8272627a699d/B, priority=13, startTime=1732724508669; duration=0sec 2024-11-27T16:21:48,706 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-27T16:21:48,706 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 0eeb0588edb2caebe71f8272627a699d:B 2024-11-27T16:21:48,706 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-27T16:21:48,707 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37619 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-27T16:21:48,707 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HStore(1540): 0eeb0588edb2caebe71f8272627a699d/C is initiating minor compaction (all files) 2024-11-27T16:21:48,707 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 0eeb0588edb2caebe71f8272627a699d/C in TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d. 2024-11-27T16:21:48,707 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/C/53e377fd583f4e4b854aae22103161a9, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/C/170861ebad7846fd8993bbbe3f30b1ba, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/C/a4d9ab1f54574a938d9b2928b532491a] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/.tmp, totalSize=36.7 K 2024-11-27T16:21:48,708 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting 53e377fd583f4e4b854aae22103161a9, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=319, earliestPutTs=1732724505477 2024-11-27T16:21:48,708 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting 170861ebad7846fd8993bbbe3f30b1ba, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=331, earliestPutTs=1732724506101 2024-11-27T16:21:48,708 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting a4d9ab1f54574a938d9b2928b532491a, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=359, earliestPutTs=1732724506769 2024-11-27T16:21:48,716 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 0eeb0588edb2caebe71f8272627a699d#C#compaction#194 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-27T16:21:48,717 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/.tmp/C/29402663de91442d9284c5a88b212fc8 is 50, key is test_row_0/C:col10/1732724506769/Put/seqid=0 2024-11-27T16:21:48,721 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742060_1236 (size=13119) 2024-11-27T16:21:48,728 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/.tmp/C/29402663de91442d9284c5a88b212fc8 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/C/29402663de91442d9284c5a88b212fc8 2024-11-27T16:21:48,734 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 0eeb0588edb2caebe71f8272627a699d/C of 0eeb0588edb2caebe71f8272627a699d into 29402663de91442d9284c5a88b212fc8(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T16:21:48,734 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 0eeb0588edb2caebe71f8272627a699d: 2024-11-27T16:21:48,734 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d., storeName=0eeb0588edb2caebe71f8272627a699d/C, priority=13, startTime=1732724508669; duration=0sec 2024-11-27T16:21:48,734 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T16:21:48,734 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 0eeb0588edb2caebe71f8272627a699d:C 2024-11-27T16:21:49,105 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 0eeb0588edb2caebe71f8272627a699d#A#compaction#193 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-27T16:21:49,105 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/.tmp/A/de4408d568174a3c8202813509689711 is 175, key is test_row_0/A:col10/1732724506769/Put/seqid=0 2024-11-27T16:21:49,137 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742061_1237 (size=32073) 2024-11-27T16:21:49,149 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/.tmp/A/de4408d568174a3c8202813509689711 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/A/de4408d568174a3c8202813509689711 2024-11-27T16:21:49,160 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 0eeb0588edb2caebe71f8272627a699d/A of 0eeb0588edb2caebe71f8272627a699d into de4408d568174a3c8202813509689711(size=31.3 K), total size for store is 31.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T16:21:49,160 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 0eeb0588edb2caebe71f8272627a699d: 2024-11-27T16:21:49,160 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d., storeName=0eeb0588edb2caebe71f8272627a699d/A, priority=13, startTime=1732724508669; duration=0sec 2024-11-27T16:21:49,161 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T16:21:49,161 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 0eeb0588edb2caebe71f8272627a699d:A 2024-11-27T16:21:49,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-11-27T16:21:49,507 INFO [Thread-681 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 55 completed 2024-11-27T16:21:49,508 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-27T16:21:49,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] procedure2.ProcedureExecutor(1098): Stored pid=57, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=57, table=TestAcidGuarantees 2024-11-27T16:21:49,510 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=57, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=57, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-27T16:21:49,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=57 2024-11-27T16:21:49,511 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=57, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=57, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-27T16:21:49,511 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=58, ppid=57, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-27T16:21:49,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(8581): Flush requested on 0eeb0588edb2caebe71f8272627a699d 2024-11-27T16:21:49,517 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 0eeb0588edb2caebe71f8272627a699d 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-27T16:21:49,517 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0eeb0588edb2caebe71f8272627a699d, store=A 2024-11-27T16:21:49,517 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:21:49,517 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0eeb0588edb2caebe71f8272627a699d, store=B 2024-11-27T16:21:49,517 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:21:49,517 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0eeb0588edb2caebe71f8272627a699d, store=C 2024-11-27T16:21:49,517 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:21:49,526 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241127ed5a2ed31c9a4978bb9701999760d814_0eeb0588edb2caebe71f8272627a699d is 50, key is test_row_0/A:col10/1732724507396/Put/seqid=0 2024-11-27T16:21:49,536 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742062_1238 (size=14994) 2024-11-27T16:21:49,567 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:49,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 162 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39180 deadline: 1732724569564, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:49,568 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:49,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 229 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39168 deadline: 1732724569565, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:49,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=57 2024-11-27T16:21:49,663 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:21:49,664 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=58 2024-11-27T16:21:49,664 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d. 2024-11-27T16:21:49,664 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d. as already flushing 2024-11-27T16:21:49,665 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d. 2024-11-27T16:21:49,665 ERROR [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] handler.RSProcedureHandler(58): pid=58 java.io.IOException: Unable to complete flush {ENCODED => 0eeb0588edb2caebe71f8272627a699d, NAME => 'TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:21:49,665 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=58 java.io.IOException: Unable to complete flush {ENCODED => 0eeb0588edb2caebe71f8272627a699d, NAME => 'TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:21:49,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4114): Remote procedure failed, pid=58 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0eeb0588edb2caebe71f8272627a699d, NAME => 'TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0eeb0588edb2caebe71f8272627a699d, NAME => 'TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:21:49,669 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:49,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 164 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39180 deadline: 1732724569668, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:49,670 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:49,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 231 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39168 deadline: 1732724569670, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:49,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=57 2024-11-27T16:21:49,817 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:21:49,817 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=58 2024-11-27T16:21:49,818 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d. 2024-11-27T16:21:49,818 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d. as already flushing 2024-11-27T16:21:49,818 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d. 2024-11-27T16:21:49,818 ERROR [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] handler.RSProcedureHandler(58): pid=58 java.io.IOException: Unable to complete flush {ENCODED => 0eeb0588edb2caebe71f8272627a699d, NAME => 'TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:21:49,818 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=58 java.io.IOException: Unable to complete flush {ENCODED => 0eeb0588edb2caebe71f8272627a699d, NAME => 'TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:21:49,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4114): Remote procedure failed, pid=58 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0eeb0588edb2caebe71f8272627a699d, NAME => 'TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0eeb0588edb2caebe71f8272627a699d, NAME => 'TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:21:49,874 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:49,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 233 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39168 deadline: 1732724569872, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:49,875 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:49,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 166 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39180 deadline: 1732724569872, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:49,937 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:49,942 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241127ed5a2ed31c9a4978bb9701999760d814_0eeb0588edb2caebe71f8272627a699d to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241127ed5a2ed31c9a4978bb9701999760d814_0eeb0588edb2caebe71f8272627a699d 2024-11-27T16:21:49,943 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/.tmp/A/4a6a8ee70592406ea8100be6945c048b, store: [table=TestAcidGuarantees family=A region=0eeb0588edb2caebe71f8272627a699d] 2024-11-27T16:21:49,944 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/.tmp/A/4a6a8ee70592406ea8100be6945c048b is 175, key is test_row_0/A:col10/1732724507396/Put/seqid=0 2024-11-27T16:21:49,949 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742063_1239 (size=39949) 2024-11-27T16:21:49,950 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=373, memsize=17.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/.tmp/A/4a6a8ee70592406ea8100be6945c048b 2024-11-27T16:21:49,961 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/.tmp/B/b51e3245b6414517963528372d8cb752 is 50, key is test_row_0/B:col10/1732724507396/Put/seqid=0 2024-11-27T16:21:49,967 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742064_1240 (size=12301) 2024-11-27T16:21:49,968 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=373 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/.tmp/B/b51e3245b6414517963528372d8cb752 2024-11-27T16:21:49,970 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:21:49,971 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=58 2024-11-27T16:21:49,971 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d. 2024-11-27T16:21:49,972 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d. as already flushing 2024-11-27T16:21:49,972 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d. 2024-11-27T16:21:49,972 ERROR [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] handler.RSProcedureHandler(58): pid=58 java.io.IOException: Unable to complete flush {ENCODED => 0eeb0588edb2caebe71f8272627a699d, NAME => 'TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:21:49,972 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=58 java.io.IOException: Unable to complete flush {ENCODED => 0eeb0588edb2caebe71f8272627a699d, NAME => 'TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:21:49,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4114): Remote procedure failed, pid=58 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0eeb0588edb2caebe71f8272627a699d, NAME => 'TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0eeb0588edb2caebe71f8272627a699d, NAME => 'TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:21:49,980 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/.tmp/C/e89b0bffa2644085854bdd2b981bf24e is 50, key is test_row_0/C:col10/1732724507396/Put/seqid=0 2024-11-27T16:21:49,986 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742065_1241 (size=12301) 2024-11-27T16:21:50,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=57 2024-11-27T16:21:50,124 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:21:50,125 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=58 2024-11-27T16:21:50,125 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d. 2024-11-27T16:21:50,125 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d. as already flushing 2024-11-27T16:21:50,125 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d. 2024-11-27T16:21:50,125 ERROR [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] handler.RSProcedureHandler(58): pid=58 java.io.IOException: Unable to complete flush {ENCODED => 0eeb0588edb2caebe71f8272627a699d, NAME => 'TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:21:50,125 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=58 java.io.IOException: Unable to complete flush {ENCODED => 0eeb0588edb2caebe71f8272627a699d, NAME => 'TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:21:50,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4114): Remote procedure failed, pid=58 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0eeb0588edb2caebe71f8272627a699d, NAME => 'TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0eeb0588edb2caebe71f8272627a699d, NAME => 'TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:21:50,176 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:50,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 235 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39168 deadline: 1732724570175, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:50,177 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:50,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 168 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39180 deadline: 1732724570176, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:50,278 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:21:50,278 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=58 2024-11-27T16:21:50,279 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d. 2024-11-27T16:21:50,279 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d. as already flushing 2024-11-27T16:21:50,279 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d. 2024-11-27T16:21:50,279 ERROR [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] handler.RSProcedureHandler(58): pid=58 java.io.IOException: Unable to complete flush {ENCODED => 0eeb0588edb2caebe71f8272627a699d, NAME => 'TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:21:50,279 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=58 java.io.IOException: Unable to complete flush {ENCODED => 0eeb0588edb2caebe71f8272627a699d, NAME => 'TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:21:50,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4114): Remote procedure failed, pid=58 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0eeb0588edb2caebe71f8272627a699d, NAME => 'TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0eeb0588edb2caebe71f8272627a699d, NAME => 'TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:21:50,387 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=373 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/.tmp/C/e89b0bffa2644085854bdd2b981bf24e 2024-11-27T16:21:50,393 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/.tmp/A/4a6a8ee70592406ea8100be6945c048b as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/A/4a6a8ee70592406ea8100be6945c048b 2024-11-27T16:21:50,401 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/A/4a6a8ee70592406ea8100be6945c048b, entries=200, sequenceid=373, filesize=39.0 K 2024-11-27T16:21:50,402 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/.tmp/B/b51e3245b6414517963528372d8cb752 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/B/b51e3245b6414517963528372d8cb752 2024-11-27T16:21:50,406 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/B/b51e3245b6414517963528372d8cb752, entries=150, sequenceid=373, filesize=12.0 K 2024-11-27T16:21:50,407 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/.tmp/C/e89b0bffa2644085854bdd2b981bf24e as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/C/e89b0bffa2644085854bdd2b981bf24e 2024-11-27T16:21:50,411 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/C/e89b0bffa2644085854bdd2b981bf24e, entries=150, sequenceid=373, filesize=12.0 K 2024-11-27T16:21:50,411 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for 0eeb0588edb2caebe71f8272627a699d in 894ms, sequenceid=373, compaction requested=false 2024-11-27T16:21:50,411 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 0eeb0588edb2caebe71f8272627a699d: 2024-11-27T16:21:50,431 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:21:50,432 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=58 2024-11-27T16:21:50,432 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d. 2024-11-27T16:21:50,432 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(2837): Flushing 0eeb0588edb2caebe71f8272627a699d 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-27T16:21:50,432 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0eeb0588edb2caebe71f8272627a699d, store=A 2024-11-27T16:21:50,432 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:21:50,432 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0eeb0588edb2caebe71f8272627a699d, store=B 2024-11-27T16:21:50,432 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:21:50,432 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0eeb0588edb2caebe71f8272627a699d, store=C 2024-11-27T16:21:50,432 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:21:50,439 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411273b650cec56d341ad9a0e4eee8312be51_0eeb0588edb2caebe71f8272627a699d is 50, key is test_row_0/A:col10/1732724509555/Put/seqid=0 2024-11-27T16:21:50,443 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742066_1242 (size=12454) 2024-11-27T16:21:50,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=57 2024-11-27T16:21:50,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(8581): Flush requested on 0eeb0588edb2caebe71f8272627a699d 2024-11-27T16:21:50,681 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d. as already flushing 2024-11-27T16:21:50,690 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:50,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 173 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39180 deadline: 1732724570688, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:50,691 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:50,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 242 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39168 deadline: 1732724570689, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:50,793 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:50,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 244 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39168 deadline: 1732724570793, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:50,794 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:50,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 175 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39180 deadline: 1732724570794, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:50,843 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:50,847 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411273b650cec56d341ad9a0e4eee8312be51_0eeb0588edb2caebe71f8272627a699d to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411273b650cec56d341ad9a0e4eee8312be51_0eeb0588edb2caebe71f8272627a699d 2024-11-27T16:21:50,848 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/.tmp/A/11bade2381f2468aa21fffe914e1b1bc, store: [table=TestAcidGuarantees family=A region=0eeb0588edb2caebe71f8272627a699d] 2024-11-27T16:21:50,849 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/.tmp/A/11bade2381f2468aa21fffe914e1b1bc is 175, key is test_row_0/A:col10/1732724509555/Put/seqid=0 2024-11-27T16:21:50,853 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742067_1243 (size=31255) 2024-11-27T16:21:50,854 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=398, memsize=49.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/.tmp/A/11bade2381f2468aa21fffe914e1b1bc 2024-11-27T16:21:50,861 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/.tmp/B/3bbc626ac8604f3c8babd35aa584a380 is 50, key is test_row_0/B:col10/1732724509555/Put/seqid=0 2024-11-27T16:21:50,869 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742068_1244 (size=12301) 2024-11-27T16:21:50,870 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=398 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/.tmp/B/3bbc626ac8604f3c8babd35aa584a380 2024-11-27T16:21:50,878 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/.tmp/C/8f14f80277b4474683fa02053c5a107a is 50, key is test_row_0/C:col10/1732724509555/Put/seqid=0 2024-11-27T16:21:50,882 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742069_1245 (size=12301) 2024-11-27T16:21:50,997 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:50,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 246 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39168 deadline: 1732724570995, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:50,997 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:50,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 177 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39180 deadline: 1732724570996, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:51,005 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:51,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39228 deadline: 1732724571003, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:51,006 DEBUG [Thread-673 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8150 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d., hostname=7b191dec6496,44169,1732724452967, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-27T16:21:51,027 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:51,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39208 deadline: 1732724571026, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:51,027 DEBUG [Thread-679 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8169 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d., hostname=7b191dec6496,44169,1732724452967, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-27T16:21:51,046 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:51,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39264 deadline: 1732724571046, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:51,047 DEBUG [Thread-677 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8186 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d., hostname=7b191dec6496,44169,1732724452967, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-27T16:21:51,286 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=398 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/.tmp/C/8f14f80277b4474683fa02053c5a107a 2024-11-27T16:21:51,292 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-27T16:21:51,293 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/.tmp/A/11bade2381f2468aa21fffe914e1b1bc as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/A/11bade2381f2468aa21fffe914e1b1bc 2024-11-27T16:21:51,297 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/A/11bade2381f2468aa21fffe914e1b1bc, entries=150, sequenceid=398, filesize=30.5 K 2024-11-27T16:21:51,298 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/.tmp/B/3bbc626ac8604f3c8babd35aa584a380 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/B/3bbc626ac8604f3c8babd35aa584a380 2024-11-27T16:21:51,303 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:51,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 248 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39168 deadline: 1732724571299, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:51,303 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:51,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 179 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39180 deadline: 1732724571300, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:51,304 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/B/3bbc626ac8604f3c8babd35aa584a380, entries=150, sequenceid=398, filesize=12.0 K 2024-11-27T16:21:51,308 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/.tmp/C/8f14f80277b4474683fa02053c5a107a as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/C/8f14f80277b4474683fa02053c5a107a 2024-11-27T16:21:51,313 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/C/8f14f80277b4474683fa02053c5a107a, entries=150, sequenceid=398, filesize=12.0 K 2024-11-27T16:21:51,314 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for 0eeb0588edb2caebe71f8272627a699d in 882ms, sequenceid=398, compaction requested=true 2024-11-27T16:21:51,314 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(2538): Flush status journal for 0eeb0588edb2caebe71f8272627a699d: 2024-11-27T16:21:51,314 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d. 2024-11-27T16:21:51,314 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=58 2024-11-27T16:21:51,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4106): Remote procedure done, pid=58 2024-11-27T16:21:51,316 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=58, resume processing ppid=57 2024-11-27T16:21:51,316 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=58, ppid=57, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.8040 sec 2024-11-27T16:21:51,318 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=57, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=57, table=TestAcidGuarantees in 1.8080 sec 2024-11-27T16:21:51,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=57 2024-11-27T16:21:51,616 INFO [Thread-681 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 57 completed 2024-11-27T16:21:51,617 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-27T16:21:51,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] procedure2.ProcedureExecutor(1098): Stored pid=59, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=59, table=TestAcidGuarantees 2024-11-27T16:21:51,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=59 2024-11-27T16:21:51,619 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=59, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=59, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-27T16:21:51,619 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=59, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=59, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-27T16:21:51,619 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=60, ppid=59, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-27T16:21:51,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=59 2024-11-27T16:21:51,774 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:21:51,775 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=60 2024-11-27T16:21:51,775 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d. 2024-11-27T16:21:51,775 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegion(2837): Flushing 0eeb0588edb2caebe71f8272627a699d 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-27T16:21:51,776 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0eeb0588edb2caebe71f8272627a699d, store=A 2024-11-27T16:21:51,776 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:21:51,776 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0eeb0588edb2caebe71f8272627a699d, store=B 2024-11-27T16:21:51,776 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:21:51,776 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0eeb0588edb2caebe71f8272627a699d, store=C 2024-11-27T16:21:51,776 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:21:51,783 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241127349c87c29ae14800981707e8e49a2c8a_0eeb0588edb2caebe71f8272627a699d is 50, key is test_row_0/A:col10/1732724510687/Put/seqid=0 2024-11-27T16:21:51,790 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742070_1246 (size=12454) 2024-11-27T16:21:51,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(8581): Flush requested on 0eeb0588edb2caebe71f8272627a699d 2024-11-27T16:21:51,808 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d. as already flushing 2024-11-27T16:21:51,846 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:51,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 192 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39180 deadline: 1732724571842, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:51,847 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:51,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 261 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39168 deadline: 1732724571846, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:51,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=59 2024-11-27T16:21:51,949 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:51,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 194 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39180 deadline: 1732724571947, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:51,949 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:51,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 263 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39168 deadline: 1732724571948, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:52,152 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:52,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 196 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39180 deadline: 1732724572151, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:52,152 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:52,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 265 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39168 deadline: 1732724572151, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:52,191 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:52,195 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241127349c87c29ae14800981707e8e49a2c8a_0eeb0588edb2caebe71f8272627a699d to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241127349c87c29ae14800981707e8e49a2c8a_0eeb0588edb2caebe71f8272627a699d 2024-11-27T16:21:52,196 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/.tmp/A/08296abe6741479581299082df35350a, store: [table=TestAcidGuarantees family=A region=0eeb0588edb2caebe71f8272627a699d] 2024-11-27T16:21:52,196 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/.tmp/A/08296abe6741479581299082df35350a is 175, key is test_row_0/A:col10/1732724510687/Put/seqid=0 2024-11-27T16:21:52,200 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742071_1247 (size=31255) 2024-11-27T16:21:52,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=59 2024-11-27T16:21:52,459 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:52,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 198 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39180 deadline: 1732724572455, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:52,459 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:52,459 DEBUG [Thread-688 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x77b8b9d2 to 127.0.0.1:51088 2024-11-27T16:21:52,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 267 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39168 deadline: 1732724572455, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:52,460 DEBUG [Thread-688 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-27T16:21:52,460 DEBUG [Thread-682 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x0c645fa1 to 127.0.0.1:51088 2024-11-27T16:21:52,460 DEBUG [Thread-682 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-27T16:21:52,462 DEBUG [Thread-686 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x0a0fc918 to 127.0.0.1:51088 2024-11-27T16:21:52,462 DEBUG [Thread-686 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-27T16:21:52,462 DEBUG [Thread-684 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x1c317ae0 to 127.0.0.1:51088 2024-11-27T16:21:52,462 DEBUG [Thread-684 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-27T16:21:52,601 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=409, memsize=17.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/.tmp/A/08296abe6741479581299082df35350a 2024-11-27T16:21:52,609 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/.tmp/B/6fd23464b39e4bf9ae0e826c803afdf9 is 50, key is test_row_0/B:col10/1732724510687/Put/seqid=0 2024-11-27T16:21:52,614 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742072_1248 (size=12301) 2024-11-27T16:21:52,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=59 2024-11-27T16:21:52,964 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:52,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 200 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39180 deadline: 1732724572963, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:52,964 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:21:52,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 269 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39168 deadline: 1732724572964, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:21:53,015 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=409 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/.tmp/B/6fd23464b39e4bf9ae0e826c803afdf9 2024-11-27T16:21:53,021 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/.tmp/C/f447ffc661464d159cc23b19a60e29b9 is 50, key is test_row_0/C:col10/1732724510687/Put/seqid=0 2024-11-27T16:21:53,025 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742073_1249 (size=12301) 2024-11-27T16:21:53,426 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=409 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/.tmp/C/f447ffc661464d159cc23b19a60e29b9 2024-11-27T16:21:53,430 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/.tmp/A/08296abe6741479581299082df35350a as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/A/08296abe6741479581299082df35350a 2024-11-27T16:21:53,434 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/A/08296abe6741479581299082df35350a, entries=150, sequenceid=409, filesize=30.5 K 2024-11-27T16:21:53,434 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/.tmp/B/6fd23464b39e4bf9ae0e826c803afdf9 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/B/6fd23464b39e4bf9ae0e826c803afdf9 2024-11-27T16:21:53,438 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/B/6fd23464b39e4bf9ae0e826c803afdf9, entries=150, sequenceid=409, filesize=12.0 K 2024-11-27T16:21:53,438 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/.tmp/C/f447ffc661464d159cc23b19a60e29b9 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/C/f447ffc661464d159cc23b19a60e29b9 2024-11-27T16:21:53,441 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/C/f447ffc661464d159cc23b19a60e29b9, entries=150, sequenceid=409, filesize=12.0 K 2024-11-27T16:21:53,442 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for 0eeb0588edb2caebe71f8272627a699d in 1667ms, sequenceid=409, compaction requested=true 2024-11-27T16:21:53,442 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegion(2538): Flush status journal for 0eeb0588edb2caebe71f8272627a699d: 2024-11-27T16:21:53,442 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d. 2024-11-27T16:21:53,442 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=60 2024-11-27T16:21:53,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4106): Remote procedure done, pid=60 2024-11-27T16:21:53,444 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=60, resume processing ppid=59 2024-11-27T16:21:53,444 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=60, ppid=59, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.8240 sec 2024-11-27T16:21:53,445 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=59, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=59, table=TestAcidGuarantees in 1.8280 sec 2024-11-27T16:21:53,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=59 2024-11-27T16:21:53,723 INFO [Thread-681 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 59 completed 2024-11-27T16:21:53,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(8581): Flush requested on 0eeb0588edb2caebe71f8272627a699d 2024-11-27T16:21:53,966 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 0eeb0588edb2caebe71f8272627a699d 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-11-27T16:21:53,966 DEBUG [Thread-675 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x40e8ce40 to 127.0.0.1:51088 2024-11-27T16:21:53,966 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0eeb0588edb2caebe71f8272627a699d, store=A 2024-11-27T16:21:53,966 DEBUG [Thread-675 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-27T16:21:53,967 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:21:53,967 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0eeb0588edb2caebe71f8272627a699d, store=B 2024-11-27T16:21:53,967 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:21:53,967 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0eeb0588edb2caebe71f8272627a699d, store=C 2024-11-27T16:21:53,967 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:21:53,971 DEBUG [Thread-671 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x5a3d7b93 to 127.0.0.1:51088 2024-11-27T16:21:53,971 DEBUG [Thread-671 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-27T16:21:53,973 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112734df2eb04ce749deb57f8cd8aaa5db9c_0eeb0588edb2caebe71f8272627a699d is 50, key is test_row_0/A:col10/1732724511835/Put/seqid=0 2024-11-27T16:21:53,979 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742074_1250 (size=12454) 2024-11-27T16:21:54,380 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:21:54,383 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112734df2eb04ce749deb57f8cd8aaa5db9c_0eeb0588edb2caebe71f8272627a699d to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112734df2eb04ce749deb57f8cd8aaa5db9c_0eeb0588edb2caebe71f8272627a699d 2024-11-27T16:21:54,384 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/.tmp/A/8c1ec0940fe941d3b2900b9835379ebe, store: [table=TestAcidGuarantees family=A region=0eeb0588edb2caebe71f8272627a699d] 2024-11-27T16:21:54,385 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/.tmp/A/8c1ec0940fe941d3b2900b9835379ebe is 175, key is test_row_0/A:col10/1732724511835/Put/seqid=0 2024-11-27T16:21:54,388 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742075_1251 (size=31255) 2024-11-27T16:21:54,789 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=435, memsize=51.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/.tmp/A/8c1ec0940fe941d3b2900b9835379ebe 2024-11-27T16:21:54,796 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/.tmp/B/b6ddad35ae804cbf89b462eeb1b9008c is 50, key is test_row_0/B:col10/1732724511835/Put/seqid=0 2024-11-27T16:21:54,799 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742076_1252 (size=12301) 2024-11-27T16:21:55,200 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=435 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/.tmp/B/b6ddad35ae804cbf89b462eeb1b9008c 2024-11-27T16:21:55,207 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/.tmp/C/b012e4b68ab9440181b9fd1404c78d68 is 50, key is test_row_0/C:col10/1732724511835/Put/seqid=0 2024-11-27T16:21:55,210 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742077_1253 (size=12301) 2024-11-27T16:21:55,335 DEBUG [regionserver/7b191dec6496:0.Chore.1 {}] throttle.PressureAwareCompactionThroughputController(103): CompactionPressure is 0.07692307692307693, tune throughput to 53.85 MB/second 2024-11-27T16:21:55,611 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=435 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/.tmp/C/b012e4b68ab9440181b9fd1404c78d68 2024-11-27T16:21:55,616 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/.tmp/A/8c1ec0940fe941d3b2900b9835379ebe as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/A/8c1ec0940fe941d3b2900b9835379ebe 2024-11-27T16:21:55,619 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/A/8c1ec0940fe941d3b2900b9835379ebe, entries=150, sequenceid=435, filesize=30.5 K 2024-11-27T16:21:55,620 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/.tmp/B/b6ddad35ae804cbf89b462eeb1b9008c as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/B/b6ddad35ae804cbf89b462eeb1b9008c 2024-11-27T16:21:55,623 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/B/b6ddad35ae804cbf89b462eeb1b9008c, entries=150, sequenceid=435, filesize=12.0 K 2024-11-27T16:21:55,624 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/.tmp/C/b012e4b68ab9440181b9fd1404c78d68 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/C/b012e4b68ab9440181b9fd1404c78d68 2024-11-27T16:21:55,627 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/C/b012e4b68ab9440181b9fd1404c78d68, entries=150, sequenceid=435, filesize=12.0 K 2024-11-27T16:21:55,628 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=6.71 KB/6870 for 0eeb0588edb2caebe71f8272627a699d in 1662ms, sequenceid=435, compaction requested=true 2024-11-27T16:21:55,628 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 0eeb0588edb2caebe71f8272627a699d: 2024-11-27T16:21:55,628 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 0eeb0588edb2caebe71f8272627a699d:A, priority=-2147483648, current under compaction store size is 1 2024-11-27T16:21:55,628 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T16:21:55,628 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 5 store files, 0 compacting, 5 eligible, 16 blocking 2024-11-27T16:21:55,628 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 0eeb0588edb2caebe71f8272627a699d:B, priority=-2147483648, current under compaction store size is 2 2024-11-27T16:21:55,628 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T16:21:55,628 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 0eeb0588edb2caebe71f8272627a699d:C, priority=-2147483648, current under compaction store size is 3 2024-11-27T16:21:55,628 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-27T16:21:55,628 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 5 store files, 0 compacting, 5 eligible, 16 blocking 2024-11-27T16:21:55,629 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 5 files of size 165787 starting at candidate #0 after considering 6 permutations with 6 in ratio 2024-11-27T16:21:55,629 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 5 files of size 62323 starting at candidate #0 after considering 6 permutations with 6 in ratio 2024-11-27T16:21:55,629 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HStore(1540): 0eeb0588edb2caebe71f8272627a699d/B is initiating minor compaction (all files) 2024-11-27T16:21:55,629 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HStore(1540): 0eeb0588edb2caebe71f8272627a699d/A is initiating minor compaction (all files) 2024-11-27T16:21:55,630 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 0eeb0588edb2caebe71f8272627a699d/B in TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d. 2024-11-27T16:21:55,630 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 0eeb0588edb2caebe71f8272627a699d/A in TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d. 2024-11-27T16:21:55,630 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/B/efd64eadfc164ea6824b3e112eee7b17, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/B/b51e3245b6414517963528372d8cb752, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/B/3bbc626ac8604f3c8babd35aa584a380, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/B/6fd23464b39e4bf9ae0e826c803afdf9, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/B/b6ddad35ae804cbf89b462eeb1b9008c] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/.tmp, totalSize=60.9 K 2024-11-27T16:21:55,630 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/A/de4408d568174a3c8202813509689711, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/A/4a6a8ee70592406ea8100be6945c048b, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/A/11bade2381f2468aa21fffe914e1b1bc, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/A/08296abe6741479581299082df35350a, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/A/8c1ec0940fe941d3b2900b9835379ebe] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/.tmp, totalSize=161.9 K 2024-11-27T16:21:55,630 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=11 throughput controller=DefaultCompactionThroughputController [maxThroughput=53.85 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d. 2024-11-27T16:21:55,630 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d. files: [hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/A/de4408d568174a3c8202813509689711, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/A/4a6a8ee70592406ea8100be6945c048b, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/A/11bade2381f2468aa21fffe914e1b1bc, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/A/08296abe6741479581299082df35350a, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/A/8c1ec0940fe941d3b2900b9835379ebe] 2024-11-27T16:21:55,630 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting efd64eadfc164ea6824b3e112eee7b17, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=359, earliestPutTs=1732724506769 2024-11-27T16:21:55,630 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.Compactor(224): Compacting de4408d568174a3c8202813509689711, keycount=150, bloomtype=ROW, size=31.3 K, encoding=NONE, compression=NONE, seqNum=359, earliestPutTs=1732724506769 2024-11-27T16:21:55,630 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4a6a8ee70592406ea8100be6945c048b, keycount=200, bloomtype=ROW, size=39.0 K, encoding=NONE, compression=NONE, seqNum=373, earliestPutTs=1732724507388 2024-11-27T16:21:55,630 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting b51e3245b6414517963528372d8cb752, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=373, earliestPutTs=1732724507390 2024-11-27T16:21:55,631 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 11bade2381f2468aa21fffe914e1b1bc, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=398, earliestPutTs=1732724509555 2024-11-27T16:21:55,631 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting 3bbc626ac8604f3c8babd35aa584a380, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=398, earliestPutTs=1732724509555 2024-11-27T16:21:55,631 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 08296abe6741479581299082df35350a, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=409, earliestPutTs=1732724510684 2024-11-27T16:21:55,631 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting 6fd23464b39e4bf9ae0e826c803afdf9, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=409, earliestPutTs=1732724510684 2024-11-27T16:21:55,631 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8c1ec0940fe941d3b2900b9835379ebe, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=435, earliestPutTs=1732724511835 2024-11-27T16:21:55,631 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting b6ddad35ae804cbf89b462eeb1b9008c, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=435, earliestPutTs=1732724511835 2024-11-27T16:21:55,642 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 0eeb0588edb2caebe71f8272627a699d#B#compaction#207 average throughput is 2.18 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 53.85 MB/second 2024-11-27T16:21:55,643 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/.tmp/B/6e33bdd6574c4bd3aa3d16032487da6a is 50, key is test_row_0/B:col10/1732724511835/Put/seqid=0 2024-11-27T16:21:55,647 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=0eeb0588edb2caebe71f8272627a699d] 2024-11-27T16:21:55,647 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742078_1254 (size=13289) 2024-11-27T16:21:55,649 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e2024112776ec5c52dcde41bfbd81edaed4df9681_0eeb0588edb2caebe71f8272627a699d store=[table=TestAcidGuarantees family=A region=0eeb0588edb2caebe71f8272627a699d] 2024-11-27T16:21:55,675 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e2024112776ec5c52dcde41bfbd81edaed4df9681_0eeb0588edb2caebe71f8272627a699d, store=[table=TestAcidGuarantees family=A region=0eeb0588edb2caebe71f8272627a699d] 2024-11-27T16:21:55,675 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112776ec5c52dcde41bfbd81edaed4df9681_0eeb0588edb2caebe71f8272627a699d because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=0eeb0588edb2caebe71f8272627a699d] 2024-11-27T16:21:55,679 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742079_1255 (size=4469) 2024-11-27T16:21:56,052 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/.tmp/B/6e33bdd6574c4bd3aa3d16032487da6a as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/B/6e33bdd6574c4bd3aa3d16032487da6a 2024-11-27T16:21:56,057 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 5 (all) file(s) in 0eeb0588edb2caebe71f8272627a699d/B of 0eeb0588edb2caebe71f8272627a699d into 6e33bdd6574c4bd3aa3d16032487da6a(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T16:21:56,057 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 0eeb0588edb2caebe71f8272627a699d: 2024-11-27T16:21:56,057 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d., storeName=0eeb0588edb2caebe71f8272627a699d/B, priority=11, startTime=1732724515628; duration=0sec 2024-11-27T16:21:56,057 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-27T16:21:56,057 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 0eeb0588edb2caebe71f8272627a699d:B 2024-11-27T16:21:56,057 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 5 store files, 0 compacting, 5 eligible, 16 blocking 2024-11-27T16:21:56,058 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 5 files of size 62323 starting at candidate #0 after considering 6 permutations with 6 in ratio 2024-11-27T16:21:56,058 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HStore(1540): 0eeb0588edb2caebe71f8272627a699d/C is initiating minor compaction (all files) 2024-11-27T16:21:56,058 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 0eeb0588edb2caebe71f8272627a699d/C in TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d. 2024-11-27T16:21:56,059 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/C/29402663de91442d9284c5a88b212fc8, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/C/e89b0bffa2644085854bdd2b981bf24e, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/C/8f14f80277b4474683fa02053c5a107a, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/C/f447ffc661464d159cc23b19a60e29b9, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/C/b012e4b68ab9440181b9fd1404c78d68] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/.tmp, totalSize=60.9 K 2024-11-27T16:21:56,059 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting 29402663de91442d9284c5a88b212fc8, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=359, earliestPutTs=1732724506769 2024-11-27T16:21:56,059 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting e89b0bffa2644085854bdd2b981bf24e, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=373, earliestPutTs=1732724507390 2024-11-27T16:21:56,059 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting 8f14f80277b4474683fa02053c5a107a, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=398, earliestPutTs=1732724509555 2024-11-27T16:21:56,060 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting f447ffc661464d159cc23b19a60e29b9, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=409, earliestPutTs=1732724510684 2024-11-27T16:21:56,060 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting b012e4b68ab9440181b9fd1404c78d68, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=435, earliestPutTs=1732724511835 2024-11-27T16:21:56,070 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 0eeb0588edb2caebe71f8272627a699d#C#compaction#209 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 53.85 MB/second 2024-11-27T16:21:56,071 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/.tmp/C/d938448a6a2a422a8ea8afba3288a0eb is 50, key is test_row_0/C:col10/1732724511835/Put/seqid=0 2024-11-27T16:21:56,075 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742080_1256 (size=13289) 2024-11-27T16:21:56,080 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 0eeb0588edb2caebe71f8272627a699d#A#compaction#208 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 53.85 MB/second 2024-11-27T16:21:56,081 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/.tmp/A/a54efd5a9bd24637bf2686e9a26c9f11 is 175, key is test_row_0/A:col10/1732724511835/Put/seqid=0 2024-11-27T16:21:56,085 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742081_1257 (size=32243) 2024-11-27T16:21:56,309 DEBUG [master/7b191dec6496:0.Chore.1 {}] balancer.RegionLocationFinder(172): Locality for region fb56c9d94acc1b64bf2472d65ab81174 changed from -1.0 to 0.0, refreshing cache 2024-11-27T16:21:56,480 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/.tmp/C/d938448a6a2a422a8ea8afba3288a0eb as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/C/d938448a6a2a422a8ea8afba3288a0eb 2024-11-27T16:21:56,485 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 5 (all) file(s) in 0eeb0588edb2caebe71f8272627a699d/C of 0eeb0588edb2caebe71f8272627a699d into d938448a6a2a422a8ea8afba3288a0eb(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T16:21:56,485 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 0eeb0588edb2caebe71f8272627a699d: 2024-11-27T16:21:56,485 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d., storeName=0eeb0588edb2caebe71f8272627a699d/C, priority=11, startTime=1732724515628; duration=0sec 2024-11-27T16:21:56,486 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T16:21:56,486 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 0eeb0588edb2caebe71f8272627a699d:C 2024-11-27T16:21:56,490 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/.tmp/A/a54efd5a9bd24637bf2686e9a26c9f11 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/A/a54efd5a9bd24637bf2686e9a26c9f11 2024-11-27T16:21:56,494 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 5 (all) file(s) in 0eeb0588edb2caebe71f8272627a699d/A of 0eeb0588edb2caebe71f8272627a699d into a54efd5a9bd24637bf2686e9a26c9f11(size=31.5 K), total size for store is 31.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T16:21:56,494 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 0eeb0588edb2caebe71f8272627a699d: 2024-11-27T16:21:56,494 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d., storeName=0eeb0588edb2caebe71f8272627a699d/A, priority=11, startTime=1732724515628; duration=0sec 2024-11-27T16:21:56,495 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T16:21:56,495 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 0eeb0588edb2caebe71f8272627a699d:A 2024-11-27T16:22:01,093 DEBUG [Thread-679 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x2362c8ba to 127.0.0.1:51088 2024-11-27T16:22:01,093 DEBUG [Thread-679 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-27T16:22:01,094 DEBUG [Thread-673 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x5ad21927 to 127.0.0.1:51088 2024-11-27T16:22:01,094 DEBUG [Thread-673 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-27T16:22:01,130 DEBUG [Thread-677 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x2939e0db to 127.0.0.1:51088 2024-11-27T16:22:01,130 DEBUG [Thread-677 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-27T16:22:01,130 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(392): Finished test. Writers: 2024-11-27T16:22:01,130 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 134 2024-11-27T16:22:01,130 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 35 2024-11-27T16:22:01,130 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 85 2024-11-27T16:22:01,130 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 23 2024-11-27T16:22:01,130 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 71 2024-11-27T16:22:01,130 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(396): Readers: 2024-11-27T16:22:01,130 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 5907 2024-11-27T16:22:01,130 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 6082 2024-11-27T16:22:01,130 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(400): Scanners: 2024-11-27T16:22:01,130 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2627 2024-11-27T16:22:01,130 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 7881 rows 2024-11-27T16:22:01,130 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2608 2024-11-27T16:22:01,130 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 7824 rows 2024-11-27T16:22:01,131 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-11-27T16:22:01,131 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x415dec94 to 127.0.0.1:51088 2024-11-27T16:22:01,131 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-27T16:22:01,134 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of TestAcidGuarantees 2024-11-27T16:22:01,134 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable TestAcidGuarantees 2024-11-27T16:22:01,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] procedure2.ProcedureExecutor(1098): Stored pid=61, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=TestAcidGuarantees 2024-11-27T16:22:01,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=61 2024-11-27T16:22:01,138 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732724521138"}]},"ts":"1732724521138"} 2024-11-27T16:22:01,139 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLING in hbase:meta 2024-11-27T16:22:01,141 INFO [PEWorker-3 {}] procedure.DisableTableProcedure(284): Set TestAcidGuarantees to state=DISABLING 2024-11-27T16:22:01,142 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=62, ppid=61, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=TestAcidGuarantees}] 2024-11-27T16:22:01,143 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=63, ppid=62, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=0eeb0588edb2caebe71f8272627a699d, UNASSIGN}] 2024-11-27T16:22:01,143 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=63, ppid=62, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=0eeb0588edb2caebe71f8272627a699d, UNASSIGN 2024-11-27T16:22:01,144 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=63 updating hbase:meta row=0eeb0588edb2caebe71f8272627a699d, regionState=CLOSING, regionLocation=7b191dec6496,44169,1732724452967 2024-11-27T16:22:01,145 DEBUG [PEWorker-5 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-27T16:22:01,145 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=64, ppid=63, state=RUNNABLE; CloseRegionProcedure 0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967}] 2024-11-27T16:22:01,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=61 2024-11-27T16:22:01,296 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:22:01,296 INFO [RS_CLOSE_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_CLOSE_REGION, pid=64}] handler.UnassignRegionHandler(124): Close 0eeb0588edb2caebe71f8272627a699d 2024-11-27T16:22:01,296 DEBUG [RS_CLOSE_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_CLOSE_REGION, pid=64}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-11-27T16:22:01,297 DEBUG [RS_CLOSE_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_CLOSE_REGION, pid=64}] regionserver.HRegion(1681): Closing 0eeb0588edb2caebe71f8272627a699d, disabling compactions & flushes 2024-11-27T16:22:01,297 INFO [RS_CLOSE_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_CLOSE_REGION, pid=64}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d. 2024-11-27T16:22:01,297 DEBUG [RS_CLOSE_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_CLOSE_REGION, pid=64}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d. 2024-11-27T16:22:01,297 DEBUG [RS_CLOSE_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_CLOSE_REGION, pid=64}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d. after waiting 0 ms 2024-11-27T16:22:01,297 DEBUG [RS_CLOSE_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_CLOSE_REGION, pid=64}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d. 2024-11-27T16:22:01,297 INFO [RS_CLOSE_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_CLOSE_REGION, pid=64}] regionserver.HRegion(2837): Flushing 0eeb0588edb2caebe71f8272627a699d 3/3 column families, dataSize=26.84 KB heapSize=71.06 KB 2024-11-27T16:22:01,297 DEBUG [RS_CLOSE_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_CLOSE_REGION, pid=64}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0eeb0588edb2caebe71f8272627a699d, store=A 2024-11-27T16:22:01,297 DEBUG [RS_CLOSE_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_CLOSE_REGION, pid=64}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:22:01,297 DEBUG [RS_CLOSE_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_CLOSE_REGION, pid=64}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0eeb0588edb2caebe71f8272627a699d, store=B 2024-11-27T16:22:01,297 DEBUG [RS_CLOSE_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_CLOSE_REGION, pid=64}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:22:01,297 DEBUG [RS_CLOSE_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_CLOSE_REGION, pid=64}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0eeb0588edb2caebe71f8272627a699d, store=C 2024-11-27T16:22:01,297 DEBUG [RS_CLOSE_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_CLOSE_REGION, pid=64}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:22:01,303 DEBUG [RS_CLOSE_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_CLOSE_REGION, pid=64}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241127cb4cfdfdb3cc4426808f0e7ffa337fa6_0eeb0588edb2caebe71f8272627a699d is 50, key is test_row_0/A:col10/1732724521129/Put/seqid=0 2024-11-27T16:22:01,307 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742082_1258 (size=9914) 2024-11-27T16:22:01,308 DEBUG [RS_CLOSE_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_CLOSE_REGION, pid=64}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:22:01,312 INFO [RS_CLOSE_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_CLOSE_REGION, pid=64}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241127cb4cfdfdb3cc4426808f0e7ffa337fa6_0eeb0588edb2caebe71f8272627a699d to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241127cb4cfdfdb3cc4426808f0e7ffa337fa6_0eeb0588edb2caebe71f8272627a699d 2024-11-27T16:22:01,313 DEBUG [RS_CLOSE_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_CLOSE_REGION, pid=64}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/.tmp/A/aa4b7e2feaeb4b2ca9e6f5aba72ef017, store: [table=TestAcidGuarantees family=A region=0eeb0588edb2caebe71f8272627a699d] 2024-11-27T16:22:01,314 DEBUG [RS_CLOSE_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_CLOSE_REGION, pid=64}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/.tmp/A/aa4b7e2feaeb4b2ca9e6f5aba72ef017 is 175, key is test_row_0/A:col10/1732724521129/Put/seqid=0 2024-11-27T16:22:01,318 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742083_1259 (size=22561) 2024-11-27T16:22:01,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=61 2024-11-27T16:22:01,719 INFO [RS_CLOSE_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_CLOSE_REGION, pid=64}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=445, memsize=8.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/.tmp/A/aa4b7e2feaeb4b2ca9e6f5aba72ef017 2024-11-27T16:22:01,725 DEBUG [RS_CLOSE_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_CLOSE_REGION, pid=64}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/.tmp/B/7b242b63eab24a878f96e465a896456b is 50, key is test_row_0/B:col10/1732724521129/Put/seqid=0 2024-11-27T16:22:01,729 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742084_1260 (size=9857) 2024-11-27T16:22:01,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=61 2024-11-27T16:22:02,130 INFO [RS_CLOSE_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_CLOSE_REGION, pid=64}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.95 KB at sequenceid=445 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/.tmp/B/7b242b63eab24a878f96e465a896456b 2024-11-27T16:22:02,137 DEBUG [RS_CLOSE_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_CLOSE_REGION, pid=64}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/.tmp/C/fe683ea14bcf4c77ad51fe200f40bfb6 is 50, key is test_row_0/C:col10/1732724521129/Put/seqid=0 2024-11-27T16:22:02,140 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742085_1261 (size=9857) 2024-11-27T16:22:02,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=61 2024-11-27T16:22:02,541 INFO [RS_CLOSE_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_CLOSE_REGION, pid=64}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.95 KB at sequenceid=445 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/.tmp/C/fe683ea14bcf4c77ad51fe200f40bfb6 2024-11-27T16:22:02,547 DEBUG [RS_CLOSE_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_CLOSE_REGION, pid=64}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/.tmp/A/aa4b7e2feaeb4b2ca9e6f5aba72ef017 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/A/aa4b7e2feaeb4b2ca9e6f5aba72ef017 2024-11-27T16:22:02,552 INFO [RS_CLOSE_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_CLOSE_REGION, pid=64}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/A/aa4b7e2feaeb4b2ca9e6f5aba72ef017, entries=100, sequenceid=445, filesize=22.0 K 2024-11-27T16:22:02,553 DEBUG [RS_CLOSE_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_CLOSE_REGION, pid=64}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/.tmp/B/7b242b63eab24a878f96e465a896456b as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/B/7b242b63eab24a878f96e465a896456b 2024-11-27T16:22:02,557 INFO [RS_CLOSE_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_CLOSE_REGION, pid=64}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/B/7b242b63eab24a878f96e465a896456b, entries=100, sequenceid=445, filesize=9.6 K 2024-11-27T16:22:02,558 DEBUG [RS_CLOSE_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_CLOSE_REGION, pid=64}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/.tmp/C/fe683ea14bcf4c77ad51fe200f40bfb6 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/C/fe683ea14bcf4c77ad51fe200f40bfb6 2024-11-27T16:22:02,562 INFO [RS_CLOSE_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_CLOSE_REGION, pid=64}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/C/fe683ea14bcf4c77ad51fe200f40bfb6, entries=100, sequenceid=445, filesize=9.6 K 2024-11-27T16:22:02,563 INFO [RS_CLOSE_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_CLOSE_REGION, pid=64}] regionserver.HRegion(3040): Finished flush of dataSize ~26.84 KB/27480, heapSize ~71.02 KB/72720, currentSize=0 B/0 for 0eeb0588edb2caebe71f8272627a699d in 1266ms, sequenceid=445, compaction requested=false 2024-11-27T16:22:02,569 DEBUG [StoreCloser-TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/A/002f374727ed434abad4fdc81c620220, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/A/3407a8a656b8441e85075602cd8a4aae, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/A/dd3d70498f42464987b8c669c59f8326, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/A/4507d6363c254a5f93ed29182618c1b4, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/A/7c3574c93c434940b7b3cca27a2bfc82, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/A/358b3a5864a64b51b2b2faf90d1e6aa7, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/A/84dedb33735242dd92d918c6d26bd5fc, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/A/f42a0578bdb94064af7aef481f16fec2, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/A/9f3d3d6ed6554879b8f68e8e29589b69, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/A/7c75de37a69944dd9ce66c448466f510, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/A/e75689ce70cd4c2282cd09bc44c6da67, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/A/a1bf1757dd28405ca76c7e9b82e8323f, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/A/1f3dea9937904ab19b12c70974b9b4c3, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/A/8fb84c3a6a23446395885a2df475f597, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/A/82022cf451fb4521b018879e91abafd8, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/A/1f6fc541afdc4655b4f1ca4fb7eb6d65, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/A/47028818db9d4134be8c4aeb48be71b0, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/A/c6923370c87145e9965cff9758c379bc, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/A/c87ad64e65644fffb7eda39027faf9e5, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/A/6dcd6eeea4514af79590dbcbe02fe48b, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/A/f67fceacab1843f190a2254649f809aa, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/A/638af244aa854b2ca178dfc9ae1e1def, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/A/841af90e0c344b2a929b5a4fb85b7b4b, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/A/de4408d568174a3c8202813509689711, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/A/ed7a61b731ee4e98b93622672cdb02aa, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/A/4a6a8ee70592406ea8100be6945c048b, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/A/11bade2381f2468aa21fffe914e1b1bc, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/A/08296abe6741479581299082df35350a, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/A/8c1ec0940fe941d3b2900b9835379ebe] to archive 2024-11-27T16:22:02,571 DEBUG [StoreCloser-TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-27T16:22:02,573 DEBUG [StoreCloser-TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/A/002f374727ed434abad4fdc81c620220 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/A/002f374727ed434abad4fdc81c620220 2024-11-27T16:22:02,575 DEBUG [StoreCloser-TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/A/3407a8a656b8441e85075602cd8a4aae to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/A/3407a8a656b8441e85075602cd8a4aae 2024-11-27T16:22:02,580 DEBUG [StoreCloser-TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/A/dd3d70498f42464987b8c669c59f8326 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/A/dd3d70498f42464987b8c669c59f8326 2024-11-27T16:22:02,582 DEBUG [StoreCloser-TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/A/4507d6363c254a5f93ed29182618c1b4 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/A/4507d6363c254a5f93ed29182618c1b4 2024-11-27T16:22:02,583 DEBUG [StoreCloser-TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/A/7c3574c93c434940b7b3cca27a2bfc82 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/A/7c3574c93c434940b7b3cca27a2bfc82 2024-11-27T16:22:02,585 DEBUG [StoreCloser-TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/A/358b3a5864a64b51b2b2faf90d1e6aa7 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/A/358b3a5864a64b51b2b2faf90d1e6aa7 2024-11-27T16:22:02,588 DEBUG [StoreCloser-TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/A/84dedb33735242dd92d918c6d26bd5fc to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/A/84dedb33735242dd92d918c6d26bd5fc 2024-11-27T16:22:02,590 DEBUG [StoreCloser-TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/A/f42a0578bdb94064af7aef481f16fec2 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/A/f42a0578bdb94064af7aef481f16fec2 2024-11-27T16:22:02,595 DEBUG [StoreCloser-TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/A/9f3d3d6ed6554879b8f68e8e29589b69 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/A/9f3d3d6ed6554879b8f68e8e29589b69 2024-11-27T16:22:02,596 DEBUG [StoreCloser-TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/A/7c75de37a69944dd9ce66c448466f510 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/A/7c75de37a69944dd9ce66c448466f510 2024-11-27T16:22:02,599 DEBUG [StoreCloser-TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/A/e75689ce70cd4c2282cd09bc44c6da67 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/A/e75689ce70cd4c2282cd09bc44c6da67 2024-11-27T16:22:02,600 DEBUG [StoreCloser-TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/A/a1bf1757dd28405ca76c7e9b82e8323f to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/A/a1bf1757dd28405ca76c7e9b82e8323f 2024-11-27T16:22:02,602 DEBUG [StoreCloser-TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/A/1f3dea9937904ab19b12c70974b9b4c3 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/A/1f3dea9937904ab19b12c70974b9b4c3 2024-11-27T16:22:02,603 DEBUG [StoreCloser-TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/A/8fb84c3a6a23446395885a2df475f597 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/A/8fb84c3a6a23446395885a2df475f597 2024-11-27T16:22:02,605 DEBUG [StoreCloser-TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/A/82022cf451fb4521b018879e91abafd8 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/A/82022cf451fb4521b018879e91abafd8 2024-11-27T16:22:02,607 DEBUG [StoreCloser-TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/A/1f6fc541afdc4655b4f1ca4fb7eb6d65 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/A/1f6fc541afdc4655b4f1ca4fb7eb6d65 2024-11-27T16:22:02,608 DEBUG [StoreCloser-TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/A/47028818db9d4134be8c4aeb48be71b0 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/A/47028818db9d4134be8c4aeb48be71b0 2024-11-27T16:22:02,610 DEBUG [StoreCloser-TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/A/c6923370c87145e9965cff9758c379bc to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/A/c6923370c87145e9965cff9758c379bc 2024-11-27T16:22:02,611 DEBUG [StoreCloser-TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/A/c87ad64e65644fffb7eda39027faf9e5 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/A/c87ad64e65644fffb7eda39027faf9e5 2024-11-27T16:22:02,615 DEBUG [StoreCloser-TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/A/6dcd6eeea4514af79590dbcbe02fe48b to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/A/6dcd6eeea4514af79590dbcbe02fe48b 2024-11-27T16:22:02,617 DEBUG [StoreCloser-TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/A/f67fceacab1843f190a2254649f809aa to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/A/f67fceacab1843f190a2254649f809aa 2024-11-27T16:22:02,618 DEBUG [StoreCloser-TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/A/638af244aa854b2ca178dfc9ae1e1def to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/A/638af244aa854b2ca178dfc9ae1e1def 2024-11-27T16:22:02,620 DEBUG [StoreCloser-TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/A/841af90e0c344b2a929b5a4fb85b7b4b to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/A/841af90e0c344b2a929b5a4fb85b7b4b 2024-11-27T16:22:02,621 DEBUG [StoreCloser-TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/A/de4408d568174a3c8202813509689711 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/A/de4408d568174a3c8202813509689711 2024-11-27T16:22:02,624 DEBUG [StoreCloser-TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/A/ed7a61b731ee4e98b93622672cdb02aa to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/A/ed7a61b731ee4e98b93622672cdb02aa 2024-11-27T16:22:02,627 DEBUG [StoreCloser-TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/A/4a6a8ee70592406ea8100be6945c048b to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/A/4a6a8ee70592406ea8100be6945c048b 2024-11-27T16:22:02,629 DEBUG [StoreCloser-TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/A/11bade2381f2468aa21fffe914e1b1bc to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/A/11bade2381f2468aa21fffe914e1b1bc 2024-11-27T16:22:02,630 DEBUG [StoreCloser-TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/A/08296abe6741479581299082df35350a to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/A/08296abe6741479581299082df35350a 2024-11-27T16:22:02,631 DEBUG [StoreCloser-TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/A/8c1ec0940fe941d3b2900b9835379ebe to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/A/8c1ec0940fe941d3b2900b9835379ebe 2024-11-27T16:22:02,633 DEBUG [StoreCloser-TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/B/55f562718c5948c092bda235c76c99b1, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/B/c6aad0e13f824890a3e7d2b54cb5ad65, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/B/5b9a2f2abdd54afd823d7f8bda8f6a21, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/B/daae7746fe5042378d4b9c12dc826fcd, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/B/01ae8ba93d15406486239b0220cd75a4, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/B/a60af2ae54684ba186a550755638f4d4, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/B/a4e282ab441f485daffbc548800448dc, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/B/a769f4e1bde642148f5529cc773eeb73, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/B/1e84bcbb96d2497996a80ee638baa3f2, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/B/44caf2ca27f940b9a772be903e5103cb, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/B/50776388d75b4536a2cffefa7246c55a, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/B/8bb5ad4c995e4ef68e0ba00c226b293e, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/B/75bc4636c36f4e46a68fa4a815796495, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/B/49bc75baae1d4671b939a8ff7d58e5bd, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/B/50bdb54d4a7a484b9b11f2705d67f7a0, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/B/0d24befe3d2b4a1fb058b0599d7aa4a8, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/B/11bd5b71690c4ec69e8dba809fde796b, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/B/3963b05c221e4790a12833cddfb5f1c2, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/B/6ce4e5d80b5e4d29af72bb9bccad87fb, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/B/dcd75ebf9b234721b7b7b22e926a73cf, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/B/05dc970bf774409aaaab085dd0275489, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/B/66a4b15e90f74765b46ce1d63282ce44, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/B/d7e5f711e45a4dbd9c32dedb69bf3f15, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/B/efd64eadfc164ea6824b3e112eee7b17, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/B/ddc0db6e1cd0401b9bf17c4bd038eea1, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/B/b51e3245b6414517963528372d8cb752, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/B/3bbc626ac8604f3c8babd35aa584a380, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/B/6fd23464b39e4bf9ae0e826c803afdf9, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/B/b6ddad35ae804cbf89b462eeb1b9008c] to archive 2024-11-27T16:22:02,634 DEBUG [StoreCloser-TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-27T16:22:02,636 DEBUG [StoreCloser-TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/B/55f562718c5948c092bda235c76c99b1 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/B/55f562718c5948c092bda235c76c99b1 2024-11-27T16:22:02,638 DEBUG [StoreCloser-TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/B/c6aad0e13f824890a3e7d2b54cb5ad65 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/B/c6aad0e13f824890a3e7d2b54cb5ad65 2024-11-27T16:22:02,639 DEBUG [StoreCloser-TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/B/5b9a2f2abdd54afd823d7f8bda8f6a21 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/B/5b9a2f2abdd54afd823d7f8bda8f6a21 2024-11-27T16:22:02,641 DEBUG [StoreCloser-TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/B/daae7746fe5042378d4b9c12dc826fcd to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/B/daae7746fe5042378d4b9c12dc826fcd 2024-11-27T16:22:02,642 DEBUG [StoreCloser-TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/B/01ae8ba93d15406486239b0220cd75a4 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/B/01ae8ba93d15406486239b0220cd75a4 2024-11-27T16:22:02,644 DEBUG [StoreCloser-TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/B/a60af2ae54684ba186a550755638f4d4 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/B/a60af2ae54684ba186a550755638f4d4 2024-11-27T16:22:02,645 DEBUG [StoreCloser-TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/B/a4e282ab441f485daffbc548800448dc to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/B/a4e282ab441f485daffbc548800448dc 2024-11-27T16:22:02,646 DEBUG [StoreCloser-TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/B/a769f4e1bde642148f5529cc773eeb73 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/B/a769f4e1bde642148f5529cc773eeb73 2024-11-27T16:22:02,648 DEBUG [StoreCloser-TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/B/1e84bcbb96d2497996a80ee638baa3f2 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/B/1e84bcbb96d2497996a80ee638baa3f2 2024-11-27T16:22:02,649 DEBUG [StoreCloser-TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/B/44caf2ca27f940b9a772be903e5103cb to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/B/44caf2ca27f940b9a772be903e5103cb 2024-11-27T16:22:02,651 DEBUG [StoreCloser-TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/B/50776388d75b4536a2cffefa7246c55a to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/B/50776388d75b4536a2cffefa7246c55a 2024-11-27T16:22:02,653 DEBUG [StoreCloser-TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/B/8bb5ad4c995e4ef68e0ba00c226b293e to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/B/8bb5ad4c995e4ef68e0ba00c226b293e 2024-11-27T16:22:02,654 DEBUG [StoreCloser-TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/B/75bc4636c36f4e46a68fa4a815796495 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/B/75bc4636c36f4e46a68fa4a815796495 2024-11-27T16:22:02,655 DEBUG [StoreCloser-TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/B/49bc75baae1d4671b939a8ff7d58e5bd to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/B/49bc75baae1d4671b939a8ff7d58e5bd 2024-11-27T16:22:02,657 DEBUG [StoreCloser-TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/B/50bdb54d4a7a484b9b11f2705d67f7a0 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/B/50bdb54d4a7a484b9b11f2705d67f7a0 2024-11-27T16:22:02,658 DEBUG [StoreCloser-TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/B/0d24befe3d2b4a1fb058b0599d7aa4a8 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/B/0d24befe3d2b4a1fb058b0599d7aa4a8 2024-11-27T16:22:02,660 DEBUG [StoreCloser-TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/B/11bd5b71690c4ec69e8dba809fde796b to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/B/11bd5b71690c4ec69e8dba809fde796b 2024-11-27T16:22:02,661 DEBUG [StoreCloser-TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/B/3963b05c221e4790a12833cddfb5f1c2 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/B/3963b05c221e4790a12833cddfb5f1c2 2024-11-27T16:22:02,663 DEBUG [StoreCloser-TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/B/6ce4e5d80b5e4d29af72bb9bccad87fb to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/B/6ce4e5d80b5e4d29af72bb9bccad87fb 2024-11-27T16:22:02,664 DEBUG [StoreCloser-TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/B/dcd75ebf9b234721b7b7b22e926a73cf to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/B/dcd75ebf9b234721b7b7b22e926a73cf 2024-11-27T16:22:02,666 DEBUG [StoreCloser-TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/B/05dc970bf774409aaaab085dd0275489 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/B/05dc970bf774409aaaab085dd0275489 2024-11-27T16:22:02,667 DEBUG [StoreCloser-TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/B/66a4b15e90f74765b46ce1d63282ce44 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/B/66a4b15e90f74765b46ce1d63282ce44 2024-11-27T16:22:02,669 DEBUG [StoreCloser-TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/B/d7e5f711e45a4dbd9c32dedb69bf3f15 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/B/d7e5f711e45a4dbd9c32dedb69bf3f15 2024-11-27T16:22:02,670 DEBUG [StoreCloser-TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/B/efd64eadfc164ea6824b3e112eee7b17 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/B/efd64eadfc164ea6824b3e112eee7b17 2024-11-27T16:22:02,672 DEBUG [StoreCloser-TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/B/ddc0db6e1cd0401b9bf17c4bd038eea1 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/B/ddc0db6e1cd0401b9bf17c4bd038eea1 2024-11-27T16:22:02,674 DEBUG [StoreCloser-TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/B/b51e3245b6414517963528372d8cb752 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/B/b51e3245b6414517963528372d8cb752 2024-11-27T16:22:02,676 DEBUG [StoreCloser-TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/B/3bbc626ac8604f3c8babd35aa584a380 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/B/3bbc626ac8604f3c8babd35aa584a380 2024-11-27T16:22:02,677 DEBUG [StoreCloser-TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/B/6fd23464b39e4bf9ae0e826c803afdf9 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/B/6fd23464b39e4bf9ae0e826c803afdf9 2024-11-27T16:22:02,679 DEBUG [StoreCloser-TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/B/b6ddad35ae804cbf89b462eeb1b9008c to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/B/b6ddad35ae804cbf89b462eeb1b9008c 2024-11-27T16:22:02,681 DEBUG [StoreCloser-TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/C/57fa462da93141e1931551e7f00628e8, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/C/ffdb929f95d34f5bbc3e4310191d39c4, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/C/0783c9d1842a40d097e999bb82466084, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/C/ae6825c059f048588118b31bdde03149, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/C/7facee1695d94554bb83f94a730d0a24, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/C/faca2b94d10f40ccb5602dffe3721f3d, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/C/c15705e064e546e08cccba7c8051a29c, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/C/31c8f9ae1b2741c686a1d565938751b1, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/C/41d255ff308440f1b753295c647127dc, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/C/37c6d412b5744deda1f0bbc23e6fb74a, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/C/432dd1347b4c46a1b11de9e597ea27c1, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/C/5e44303fe9a744e9ac247fbd24a6f52c, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/C/39cf88e556754f7cbeb810db3b97d36e, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/C/b7bd01a0b3fd44ef901efe6966a78353, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/C/7fa3cb27374e45a1af8e3e522cec87d1, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/C/b4bff3ba2a4d42fab6d83be557eb45e3, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/C/42f1c5395a2548e6b73a11d4c7663cbe, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/C/29da542e398046f5b7d5375b9d2fdfae, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/C/6c4a37c215b44ba29e4154f48ecd435b, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/C/f4550b3123ee4c7db257d066c11bc4a9, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/C/53e377fd583f4e4b854aae22103161a9, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/C/4b7506b8c18d4d08b5a29393adfe432f, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/C/170861ebad7846fd8993bbbe3f30b1ba, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/C/29402663de91442d9284c5a88b212fc8, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/C/a4d9ab1f54574a938d9b2928b532491a, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/C/e89b0bffa2644085854bdd2b981bf24e, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/C/8f14f80277b4474683fa02053c5a107a, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/C/f447ffc661464d159cc23b19a60e29b9, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/C/b012e4b68ab9440181b9fd1404c78d68] to archive 2024-11-27T16:22:02,682 DEBUG [StoreCloser-TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-27T16:22:02,685 DEBUG [StoreCloser-TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/C/57fa462da93141e1931551e7f00628e8 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/C/57fa462da93141e1931551e7f00628e8 2024-11-27T16:22:02,686 DEBUG [StoreCloser-TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/C/ffdb929f95d34f5bbc3e4310191d39c4 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/C/ffdb929f95d34f5bbc3e4310191d39c4 2024-11-27T16:22:02,688 DEBUG [StoreCloser-TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/C/0783c9d1842a40d097e999bb82466084 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/C/0783c9d1842a40d097e999bb82466084 2024-11-27T16:22:02,689 DEBUG [StoreCloser-TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/C/ae6825c059f048588118b31bdde03149 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/C/ae6825c059f048588118b31bdde03149 2024-11-27T16:22:02,692 DEBUG [StoreCloser-TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/C/7facee1695d94554bb83f94a730d0a24 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/C/7facee1695d94554bb83f94a730d0a24 2024-11-27T16:22:02,694 DEBUG [StoreCloser-TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/C/faca2b94d10f40ccb5602dffe3721f3d to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/C/faca2b94d10f40ccb5602dffe3721f3d 2024-11-27T16:22:02,695 DEBUG [StoreCloser-TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/C/c15705e064e546e08cccba7c8051a29c to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/C/c15705e064e546e08cccba7c8051a29c 2024-11-27T16:22:02,696 DEBUG [StoreCloser-TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/C/31c8f9ae1b2741c686a1d565938751b1 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/C/31c8f9ae1b2741c686a1d565938751b1 2024-11-27T16:22:02,697 DEBUG [StoreCloser-TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/C/41d255ff308440f1b753295c647127dc to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/C/41d255ff308440f1b753295c647127dc 2024-11-27T16:22:02,699 DEBUG [StoreCloser-TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/C/37c6d412b5744deda1f0bbc23e6fb74a to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/C/37c6d412b5744deda1f0bbc23e6fb74a 2024-11-27T16:22:02,700 DEBUG [StoreCloser-TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/C/432dd1347b4c46a1b11de9e597ea27c1 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/C/432dd1347b4c46a1b11de9e597ea27c1 2024-11-27T16:22:02,701 DEBUG [StoreCloser-TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/C/5e44303fe9a744e9ac247fbd24a6f52c to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/C/5e44303fe9a744e9ac247fbd24a6f52c 2024-11-27T16:22:02,702 DEBUG [StoreCloser-TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/C/39cf88e556754f7cbeb810db3b97d36e to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/C/39cf88e556754f7cbeb810db3b97d36e 2024-11-27T16:22:02,704 DEBUG [StoreCloser-TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/C/b7bd01a0b3fd44ef901efe6966a78353 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/C/b7bd01a0b3fd44ef901efe6966a78353 2024-11-27T16:22:02,705 DEBUG [StoreCloser-TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/C/7fa3cb27374e45a1af8e3e522cec87d1 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/C/7fa3cb27374e45a1af8e3e522cec87d1 2024-11-27T16:22:02,706 DEBUG [StoreCloser-TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/C/b4bff3ba2a4d42fab6d83be557eb45e3 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/C/b4bff3ba2a4d42fab6d83be557eb45e3 2024-11-27T16:22:02,707 DEBUG [StoreCloser-TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/C/42f1c5395a2548e6b73a11d4c7663cbe to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/C/42f1c5395a2548e6b73a11d4c7663cbe 2024-11-27T16:22:02,709 DEBUG [StoreCloser-TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/C/29da542e398046f5b7d5375b9d2fdfae to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/C/29da542e398046f5b7d5375b9d2fdfae 2024-11-27T16:22:02,710 DEBUG [StoreCloser-TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/C/6c4a37c215b44ba29e4154f48ecd435b to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/C/6c4a37c215b44ba29e4154f48ecd435b 2024-11-27T16:22:02,712 DEBUG [StoreCloser-TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/C/f4550b3123ee4c7db257d066c11bc4a9 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/C/f4550b3123ee4c7db257d066c11bc4a9 2024-11-27T16:22:02,713 DEBUG [StoreCloser-TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/C/53e377fd583f4e4b854aae22103161a9 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/C/53e377fd583f4e4b854aae22103161a9 2024-11-27T16:22:02,715 DEBUG [StoreCloser-TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/C/4b7506b8c18d4d08b5a29393adfe432f to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/C/4b7506b8c18d4d08b5a29393adfe432f 2024-11-27T16:22:02,717 DEBUG [StoreCloser-TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/C/170861ebad7846fd8993bbbe3f30b1ba to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/C/170861ebad7846fd8993bbbe3f30b1ba 2024-11-27T16:22:02,718 DEBUG [StoreCloser-TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/C/29402663de91442d9284c5a88b212fc8 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/C/29402663de91442d9284c5a88b212fc8 2024-11-27T16:22:02,720 DEBUG [StoreCloser-TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/C/a4d9ab1f54574a938d9b2928b532491a to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/C/a4d9ab1f54574a938d9b2928b532491a 2024-11-27T16:22:02,721 DEBUG [StoreCloser-TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/C/e89b0bffa2644085854bdd2b981bf24e to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/C/e89b0bffa2644085854bdd2b981bf24e 2024-11-27T16:22:02,723 DEBUG [StoreCloser-TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/C/8f14f80277b4474683fa02053c5a107a to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/C/8f14f80277b4474683fa02053c5a107a 2024-11-27T16:22:02,725 DEBUG [StoreCloser-TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/C/f447ffc661464d159cc23b19a60e29b9 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/C/f447ffc661464d159cc23b19a60e29b9 2024-11-27T16:22:02,726 DEBUG [StoreCloser-TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/C/b012e4b68ab9440181b9fd1404c78d68 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/C/b012e4b68ab9440181b9fd1404c78d68 2024-11-27T16:22:02,739 DEBUG [RS_CLOSE_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_CLOSE_REGION, pid=64}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/recovered.edits/448.seqid, newMaxSeqId=448, maxSeqId=4 2024-11-27T16:22:02,740 INFO [RS_CLOSE_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_CLOSE_REGION, pid=64}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d. 2024-11-27T16:22:02,740 DEBUG [RS_CLOSE_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_CLOSE_REGION, pid=64}] regionserver.HRegion(1635): Region close journal for 0eeb0588edb2caebe71f8272627a699d: 2024-11-27T16:22:02,742 INFO [RS_CLOSE_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_CLOSE_REGION, pid=64}] handler.UnassignRegionHandler(170): Closed 0eeb0588edb2caebe71f8272627a699d 2024-11-27T16:22:02,742 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=63 updating hbase:meta row=0eeb0588edb2caebe71f8272627a699d, regionState=CLOSED 2024-11-27T16:22:02,745 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=64, resume processing ppid=63 2024-11-27T16:22:02,745 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=64, ppid=63, state=SUCCESS; CloseRegionProcedure 0eeb0588edb2caebe71f8272627a699d, server=7b191dec6496,44169,1732724452967 in 1.5980 sec 2024-11-27T16:22:02,747 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=63, resume processing ppid=62 2024-11-27T16:22:02,747 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=63, ppid=62, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=0eeb0588edb2caebe71f8272627a699d, UNASSIGN in 1.6020 sec 2024-11-27T16:22:02,749 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=62, resume processing ppid=61 2024-11-27T16:22:02,749 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=62, ppid=61, state=SUCCESS; CloseTableRegionsProcedure table=TestAcidGuarantees in 1.6050 sec 2024-11-27T16:22:02,750 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732724522750"}]},"ts":"1732724522750"} 2024-11-27T16:22:02,751 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLED in hbase:meta 2024-11-27T16:22:02,754 INFO [PEWorker-1 {}] procedure.DisableTableProcedure(296): Set TestAcidGuarantees to state=DISABLED 2024-11-27T16:22:02,758 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=61, state=SUCCESS; DisableTableProcedure table=TestAcidGuarantees in 1.6210 sec 2024-11-27T16:22:03,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=61 2024-11-27T16:22:03,242 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:TestAcidGuarantees, procId: 61 completed 2024-11-27T16:22:03,242 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete TestAcidGuarantees 2024-11-27T16:22:03,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] procedure2.ProcedureExecutor(1098): Stored pid=65, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=TestAcidGuarantees 2024-11-27T16:22:03,244 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=65, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-27T16:22:03,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=65 2024-11-27T16:22:03,245 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=65, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-27T16:22:03,247 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d 2024-11-27T16:22:03,249 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/A, FileablePath, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/B, FileablePath, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/C, FileablePath, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/recovered.edits] 2024-11-27T16:22:03,253 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/A/a54efd5a9bd24637bf2686e9a26c9f11 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/A/a54efd5a9bd24637bf2686e9a26c9f11 2024-11-27T16:22:03,254 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/A/aa4b7e2feaeb4b2ca9e6f5aba72ef017 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/A/aa4b7e2feaeb4b2ca9e6f5aba72ef017 2024-11-27T16:22:03,257 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/B/6e33bdd6574c4bd3aa3d16032487da6a to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/B/6e33bdd6574c4bd3aa3d16032487da6a 2024-11-27T16:22:03,258 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/B/7b242b63eab24a878f96e465a896456b to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/B/7b242b63eab24a878f96e465a896456b 2024-11-27T16:22:03,261 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/C/d938448a6a2a422a8ea8afba3288a0eb to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/C/d938448a6a2a422a8ea8afba3288a0eb 2024-11-27T16:22:03,262 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/C/fe683ea14bcf4c77ad51fe200f40bfb6 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/C/fe683ea14bcf4c77ad51fe200f40bfb6 2024-11-27T16:22:03,266 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/recovered.edits/448.seqid to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d/recovered.edits/448.seqid 2024-11-27T16:22:03,267 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/0eeb0588edb2caebe71f8272627a699d 2024-11-27T16:22:03,267 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(313): Archived TestAcidGuarantees regions 2024-11-27T16:22:03,267 DEBUG [PEWorker-2 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3 2024-11-27T16:22:03,268 DEBUG [PEWorker-2 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A] 2024-11-27T16:22:03,274 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411270a21df611b4143689e156aa42f16e5b3_0eeb0588edb2caebe71f8272627a699d to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411270a21df611b4143689e156aa42f16e5b3_0eeb0588edb2caebe71f8272627a699d 2024-11-27T16:22:03,276 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112713d7ae4d23824f72a917b7d499ee2401_0eeb0588edb2caebe71f8272627a699d to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112713d7ae4d23824f72a917b7d499ee2401_0eeb0588edb2caebe71f8272627a699d 2024-11-27T16:22:03,277 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241127349c87c29ae14800981707e8e49a2c8a_0eeb0588edb2caebe71f8272627a699d to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241127349c87c29ae14800981707e8e49a2c8a_0eeb0588edb2caebe71f8272627a699d 2024-11-27T16:22:03,279 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112734df2eb04ce749deb57f8cd8aaa5db9c_0eeb0588edb2caebe71f8272627a699d to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112734df2eb04ce749deb57f8cd8aaa5db9c_0eeb0588edb2caebe71f8272627a699d 2024-11-27T16:22:03,281 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411273b650cec56d341ad9a0e4eee8312be51_0eeb0588edb2caebe71f8272627a699d to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411273b650cec56d341ad9a0e4eee8312be51_0eeb0588edb2caebe71f8272627a699d 2024-11-27T16:22:03,282 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411273bc8afd2a7cb4c77bb56b921c2ff9998_0eeb0588edb2caebe71f8272627a699d to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411273bc8afd2a7cb4c77bb56b921c2ff9998_0eeb0588edb2caebe71f8272627a699d 2024-11-27T16:22:03,284 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411273ce063eebbe74b41bfc80a429b921f49_0eeb0588edb2caebe71f8272627a699d to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411273ce063eebbe74b41bfc80a429b921f49_0eeb0588edb2caebe71f8272627a699d 2024-11-27T16:22:03,286 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241127549c59aa3e5f469a8933dec924295282_0eeb0588edb2caebe71f8272627a699d to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241127549c59aa3e5f469a8933dec924295282_0eeb0588edb2caebe71f8272627a699d 2024-11-27T16:22:03,287 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411275c73d8a97b7d4c6ca2e70233c29dab57_0eeb0588edb2caebe71f8272627a699d to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411275c73d8a97b7d4c6ca2e70233c29dab57_0eeb0588edb2caebe71f8272627a699d 2024-11-27T16:22:03,289 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411275d7195e73ff746b48cf94908f7447f4d_0eeb0588edb2caebe71f8272627a699d to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411275d7195e73ff746b48cf94908f7447f4d_0eeb0588edb2caebe71f8272627a699d 2024-11-27T16:22:03,291 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112774a504e677184d61ab5651d265c65e39_0eeb0588edb2caebe71f8272627a699d to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112774a504e677184d61ab5651d265c65e39_0eeb0588edb2caebe71f8272627a699d 2024-11-27T16:22:03,292 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411278058163c424643f4bee84449b65d5b53_0eeb0588edb2caebe71f8272627a699d to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411278058163c424643f4bee84449b65d5b53_0eeb0588edb2caebe71f8272627a699d 2024-11-27T16:22:03,293 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112780b52c9e2f824935b1c88aed6157a838_0eeb0588edb2caebe71f8272627a699d to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112780b52c9e2f824935b1c88aed6157a838_0eeb0588edb2caebe71f8272627a699d 2024-11-27T16:22:03,295 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411279116acfc9e15435a906593982e14e41e_0eeb0588edb2caebe71f8272627a699d to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411279116acfc9e15435a906593982e14e41e_0eeb0588edb2caebe71f8272627a699d 2024-11-27T16:22:03,298 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241127a652f99f908f448c973174fbf436e7bb_0eeb0588edb2caebe71f8272627a699d to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241127a652f99f908f448c973174fbf436e7bb_0eeb0588edb2caebe71f8272627a699d 2024-11-27T16:22:03,300 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241127a90cee46f3b544b6bd184d641c17c2bd_0eeb0588edb2caebe71f8272627a699d to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241127a90cee46f3b544b6bd184d641c17c2bd_0eeb0588edb2caebe71f8272627a699d 2024-11-27T16:22:03,301 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241127b7f280e6209948da9ba3614bf9262889_0eeb0588edb2caebe71f8272627a699d to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241127b7f280e6209948da9ba3614bf9262889_0eeb0588edb2caebe71f8272627a699d 2024-11-27T16:22:03,303 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241127cb4cfdfdb3cc4426808f0e7ffa337fa6_0eeb0588edb2caebe71f8272627a699d to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241127cb4cfdfdb3cc4426808f0e7ffa337fa6_0eeb0588edb2caebe71f8272627a699d 2024-11-27T16:22:03,305 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241127dcf983f2b83e4b57a743638fea08831d_0eeb0588edb2caebe71f8272627a699d to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241127dcf983f2b83e4b57a743638fea08831d_0eeb0588edb2caebe71f8272627a699d 2024-11-27T16:22:03,306 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241127e07c85fa0a7747aa8d7b533b027c3e40_0eeb0588edb2caebe71f8272627a699d to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241127e07c85fa0a7747aa8d7b533b027c3e40_0eeb0588edb2caebe71f8272627a699d 2024-11-27T16:22:03,308 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241127e0d4e0094ee24404bb0994718e3995c5_0eeb0588edb2caebe71f8272627a699d to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241127e0d4e0094ee24404bb0994718e3995c5_0eeb0588edb2caebe71f8272627a699d 2024-11-27T16:22:03,309 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241127e3180e6c5ed646aeb0c5f5a5511892f8_0eeb0588edb2caebe71f8272627a699d to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241127e3180e6c5ed646aeb0c5f5a5511892f8_0eeb0588edb2caebe71f8272627a699d 2024-11-27T16:22:03,311 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241127ed5a2ed31c9a4978bb9701999760d814_0eeb0588edb2caebe71f8272627a699d to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241127ed5a2ed31c9a4978bb9701999760d814_0eeb0588edb2caebe71f8272627a699d 2024-11-27T16:22:03,312 DEBUG [PEWorker-2 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3 2024-11-27T16:22:03,314 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=65, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-27T16:22:03,318 WARN [PEWorker-2 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 1 rows of TestAcidGuarantees from hbase:meta 2024-11-27T16:22:03,320 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(407): Removing 'TestAcidGuarantees' descriptor. 2024-11-27T16:22:03,321 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=65, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-27T16:22:03,321 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(397): Removing 'TestAcidGuarantees' from region states. 2024-11-27T16:22:03,322 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732724523321"}]},"ts":"9223372036854775807"} 2024-11-27T16:22:03,324 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1808): Deleted 1 regions from META 2024-11-27T16:22:03,324 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => 0eeb0588edb2caebe71f8272627a699d, NAME => 'TestAcidGuarantees,,1732724489323.0eeb0588edb2caebe71f8272627a699d.', STARTKEY => '', ENDKEY => ''}] 2024-11-27T16:22:03,324 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(401): Marking 'TestAcidGuarantees' as deleted. 2024-11-27T16:22:03,325 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1732724523324"}]},"ts":"9223372036854775807"} 2024-11-27T16:22:03,329 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1678): Deleted table TestAcidGuarantees state from META 2024-11-27T16:22:03,332 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(133): Finished pid=65, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-27T16:22:03,333 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=65, state=SUCCESS; DeleteTableProcedure table=TestAcidGuarantees in 90 msec 2024-11-27T16:22:03,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=65 2024-11-27T16:22:03,346 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:TestAcidGuarantees, procId: 65 completed 2024-11-27T16:22:03,359 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: TestAcidGuaranteesWithBasicPolicy#testMobMixedAtomicity Thread=237 (was 238), OpenFileDescriptor=450 (was 450), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=404 (was 322) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=5102 (was 4539) - AvailableMemoryMB LEAK? - 2024-11-27T16:22:03,371 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: TestAcidGuaranteesWithBasicPolicy#testGetAtomicity Thread=237, OpenFileDescriptor=450, MaxFileDescriptor=1048576, SystemLoadAverage=404, ProcessCount=11, AvailableMemoryMB=5101 2024-11-27T16:22:03,373 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-11-27T16:22:03,374 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'BASIC'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-27T16:22:03,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] procedure2.ProcedureExecutor(1098): Stored pid=66, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestAcidGuarantees 2024-11-27T16:22:03,376 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=66, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_PRE_OPERATION 2024-11-27T16:22:03,376 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:22:03,377 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestAcidGuarantees" procId is: 66 2024-11-27T16:22:03,377 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=66, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-27T16:22:03,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=66 2024-11-27T16:22:03,389 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742086_1262 (size=960) 2024-11-27T16:22:03,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=66 2024-11-27T16:22:03,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=66 2024-11-27T16:22:03,793 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => b7c24f821c64d1ed1608bef04711b574, NAME => 'TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'BASIC', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59 2024-11-27T16:22:03,805 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742087_1263 (size=53) 2024-11-27T16:22:03,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=66 2024-11-27T16:22:04,206 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-27T16:22:04,206 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1681): Closing b7c24f821c64d1ed1608bef04711b574, disabling compactions & flushes 2024-11-27T16:22:04,206 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574. 2024-11-27T16:22:04,206 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574. 2024-11-27T16:22:04,206 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574. after waiting 0 ms 2024-11-27T16:22:04,206 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574. 2024-11-27T16:22:04,206 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574. 2024-11-27T16:22:04,206 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1635): Region close journal for b7c24f821c64d1ed1608bef04711b574: 2024-11-27T16:22:04,208 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=66, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ADD_TO_META 2024-11-27T16:22:04,208 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574.","families":{"info":[{"qualifier":"regioninfo","vlen":52,"tag":[],"timestamp":"1732724524208"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732724524208"}]},"ts":"1732724524208"} 2024-11-27T16:22:04,209 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-11-27T16:22:04,210 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=66, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-27T16:22:04,210 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732724524210"}]},"ts":"1732724524210"} 2024-11-27T16:22:04,211 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLING in hbase:meta 2024-11-27T16:22:04,217 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=67, ppid=66, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=b7c24f821c64d1ed1608bef04711b574, ASSIGN}] 2024-11-27T16:22:04,218 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=67, ppid=66, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=b7c24f821c64d1ed1608bef04711b574, ASSIGN 2024-11-27T16:22:04,219 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(264): Starting pid=67, ppid=66, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=b7c24f821c64d1ed1608bef04711b574, ASSIGN; state=OFFLINE, location=7b191dec6496,44169,1732724452967; forceNewPlan=false, retain=false 2024-11-27T16:22:04,369 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=67 updating hbase:meta row=b7c24f821c64d1ed1608bef04711b574, regionState=OPENING, regionLocation=7b191dec6496,44169,1732724452967 2024-11-27T16:22:04,371 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=68, ppid=67, state=RUNNABLE; OpenRegionProcedure b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967}] 2024-11-27T16:22:04,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=66 2024-11-27T16:22:04,522 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:22:04,526 INFO [RS_OPEN_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_OPEN_REGION, pid=68}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574. 2024-11-27T16:22:04,526 DEBUG [RS_OPEN_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_OPEN_REGION, pid=68}] regionserver.HRegion(7285): Opening region: {ENCODED => b7c24f821c64d1ed1608bef04711b574, NAME => 'TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574.', STARTKEY => '', ENDKEY => ''} 2024-11-27T16:22:04,527 DEBUG [RS_OPEN_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_OPEN_REGION, pid=68}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees b7c24f821c64d1ed1608bef04711b574 2024-11-27T16:22:04,527 DEBUG [RS_OPEN_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_OPEN_REGION, pid=68}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-27T16:22:04,527 DEBUG [RS_OPEN_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_OPEN_REGION, pid=68}] regionserver.HRegion(7327): checking encryption for b7c24f821c64d1ed1608bef04711b574 2024-11-27T16:22:04,527 DEBUG [RS_OPEN_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_OPEN_REGION, pid=68}] regionserver.HRegion(7330): checking classloading for b7c24f821c64d1ed1608bef04711b574 2024-11-27T16:22:04,528 INFO [StoreOpener-b7c24f821c64d1ed1608bef04711b574-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region b7c24f821c64d1ed1608bef04711b574 2024-11-27T16:22:04,530 INFO [StoreOpener-b7c24f821c64d1ed1608bef04711b574-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-27T16:22:04,530 INFO [StoreOpener-b7c24f821c64d1ed1608bef04711b574-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region b7c24f821c64d1ed1608bef04711b574 columnFamilyName A 2024-11-27T16:22:04,530 DEBUG [StoreOpener-b7c24f821c64d1ed1608bef04711b574-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:22:04,531 INFO [StoreOpener-b7c24f821c64d1ed1608bef04711b574-1 {}] regionserver.HStore(327): Store=b7c24f821c64d1ed1608bef04711b574/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-27T16:22:04,531 INFO [StoreOpener-b7c24f821c64d1ed1608bef04711b574-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region b7c24f821c64d1ed1608bef04711b574 2024-11-27T16:22:04,532 INFO [StoreOpener-b7c24f821c64d1ed1608bef04711b574-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-27T16:22:04,533 INFO [StoreOpener-b7c24f821c64d1ed1608bef04711b574-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region b7c24f821c64d1ed1608bef04711b574 columnFamilyName B 2024-11-27T16:22:04,533 DEBUG [StoreOpener-b7c24f821c64d1ed1608bef04711b574-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:22:04,533 INFO [StoreOpener-b7c24f821c64d1ed1608bef04711b574-1 {}] regionserver.HStore(327): Store=b7c24f821c64d1ed1608bef04711b574/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-27T16:22:04,533 INFO [StoreOpener-b7c24f821c64d1ed1608bef04711b574-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region b7c24f821c64d1ed1608bef04711b574 2024-11-27T16:22:04,535 INFO [StoreOpener-b7c24f821c64d1ed1608bef04711b574-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-27T16:22:04,535 INFO [StoreOpener-b7c24f821c64d1ed1608bef04711b574-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region b7c24f821c64d1ed1608bef04711b574 columnFamilyName C 2024-11-27T16:22:04,535 DEBUG [StoreOpener-b7c24f821c64d1ed1608bef04711b574-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:22:04,535 INFO [StoreOpener-b7c24f821c64d1ed1608bef04711b574-1 {}] regionserver.HStore(327): Store=b7c24f821c64d1ed1608bef04711b574/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-27T16:22:04,536 INFO [RS_OPEN_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_OPEN_REGION, pid=68}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574. 2024-11-27T16:22:04,536 DEBUG [RS_OPEN_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_OPEN_REGION, pid=68}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574 2024-11-27T16:22:04,537 DEBUG [RS_OPEN_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_OPEN_REGION, pid=68}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574 2024-11-27T16:22:04,538 DEBUG [RS_OPEN_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_OPEN_REGION, pid=68}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-27T16:22:04,540 DEBUG [RS_OPEN_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_OPEN_REGION, pid=68}] regionserver.HRegion(1085): writing seq id for b7c24f821c64d1ed1608bef04711b574 2024-11-27T16:22:04,543 DEBUG [RS_OPEN_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_OPEN_REGION, pid=68}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-27T16:22:04,544 INFO [RS_OPEN_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_OPEN_REGION, pid=68}] regionserver.HRegion(1102): Opened b7c24f821c64d1ed1608bef04711b574; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=65983963, jitterRate=-0.016762331128120422}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-27T16:22:04,545 DEBUG [RS_OPEN_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_OPEN_REGION, pid=68}] regionserver.HRegion(1001): Region open journal for b7c24f821c64d1ed1608bef04711b574: 2024-11-27T16:22:04,545 INFO [RS_OPEN_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_OPEN_REGION, pid=68}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574., pid=68, masterSystemTime=1732724524522 2024-11-27T16:22:04,547 DEBUG [RS_OPEN_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_OPEN_REGION, pid=68}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574. 2024-11-27T16:22:04,547 INFO [RS_OPEN_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_OPEN_REGION, pid=68}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574. 2024-11-27T16:22:04,547 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=67 updating hbase:meta row=b7c24f821c64d1ed1608bef04711b574, regionState=OPEN, openSeqNum=2, regionLocation=7b191dec6496,44169,1732724452967 2024-11-27T16:22:04,550 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=68, resume processing ppid=67 2024-11-27T16:22:04,550 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=68, ppid=67, state=SUCCESS; OpenRegionProcedure b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 in 177 msec 2024-11-27T16:22:04,552 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=67, resume processing ppid=66 2024-11-27T16:22:04,552 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=67, ppid=66, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=b7c24f821c64d1ed1608bef04711b574, ASSIGN in 333 msec 2024-11-27T16:22:04,552 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=66, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-27T16:22:04,553 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732724524552"}]},"ts":"1732724524552"} 2024-11-27T16:22:04,553 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLED in hbase:meta 2024-11-27T16:22:04,560 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=66, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_POST_OPERATION 2024-11-27T16:22:04,561 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=66, state=SUCCESS; CreateTableProcedure table=TestAcidGuarantees in 1.1860 sec 2024-11-27T16:22:05,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=66 2024-11-27T16:22:05,484 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestAcidGuarantees, procId: 66 completed 2024-11-27T16:22:05,486 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x473dfbd2 to 127.0.0.1:51088 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@320146a2 2024-11-27T16:22:05,492 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@fe4ac0b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-27T16:22:05,493 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-27T16:22:05,495 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54190, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-27T16:22:05,496 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-27T16:22:05,497 INFO [RS-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59814, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-27T16:22:05,499 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x63a751b9 to 127.0.0.1:51088 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@5bab3f39 2024-11-27T16:22:05,504 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@718544b3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-27T16:22:05,505 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x05b4256e to 127.0.0.1:51088 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@7e48016 2024-11-27T16:22:05,509 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5c9ffc85, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-27T16:22:05,510 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x18ed3e4c to 127.0.0.1:51088 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@3b2ae977 2024-11-27T16:22:05,515 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@18724143, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-27T16:22:05,517 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x62de434f to 127.0.0.1:51088 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@ed37f32 2024-11-27T16:22:05,521 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4215ff2, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-27T16:22:05,522 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x07575b91 to 127.0.0.1:51088 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@12e88ea6 2024-11-27T16:22:05,527 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1cb89dc6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-27T16:22:05,528 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x7dfc4f36 to 127.0.0.1:51088 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@25f2abe2 2024-11-27T16:22:05,538 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3a724365, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-27T16:22:05,539 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x3505ffc0 to 127.0.0.1:51088 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@598ef39 2024-11-27T16:22:05,542 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2d59ed84, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-27T16:22:05,543 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x61f33e78 to 127.0.0.1:51088 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@23d0f458 2024-11-27T16:22:05,550 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4187186b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-27T16:22:05,551 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x532e5d9f to 127.0.0.1:51088 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@77780196 2024-11-27T16:22:05,568 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@ec51b52, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-27T16:22:05,569 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x31dd347a to 127.0.0.1:51088 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@69ef766 2024-11-27T16:22:05,583 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@20a7636c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-27T16:22:05,600 DEBUG [hconnection-0x52f7c385-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-27T16:22:05,601 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54196, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-27T16:22:05,605 DEBUG [hconnection-0x4b02cfaa-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-27T16:22:05,606 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54202, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-27T16:22:05,612 DEBUG [hconnection-0x68a2ff28-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-27T16:22:05,614 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54204, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-27T16:22:05,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(8581): Flush requested on b7c24f821c64d1ed1608bef04711b574 2024-11-27T16:22:05,620 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing b7c24f821c64d1ed1608bef04711b574 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-27T16:22:05,620 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b7c24f821c64d1ed1608bef04711b574, store=A 2024-11-27T16:22:05,620 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:22:05,620 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b7c24f821c64d1ed1608bef04711b574, store=B 2024-11-27T16:22:05,620 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:22:05,620 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b7c24f821c64d1ed1608bef04711b574, store=C 2024-11-27T16:22:05,620 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:22:05,629 DEBUG [hconnection-0x7caf21b4-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-27T16:22:05,630 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54206, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-27T16:22:05,641 DEBUG [hconnection-0x606487b4-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-27T16:22:05,643 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54212, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-27T16:22:05,668 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:05,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54196 deadline: 1732724585665, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:05,669 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:05,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54212 deadline: 1732724585666, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:05,700 DEBUG [hconnection-0x80fa607-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-27T16:22:05,700 DEBUG [hconnection-0x45f1c47d-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-27T16:22:05,701 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54216, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-27T16:22:05,701 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54220, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-27T16:22:05,711 DEBUG [hconnection-0x587bab53-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-27T16:22:05,712 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54228, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-27T16:22:05,719 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:05,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 2 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54228 deadline: 1732724585714, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:05,722 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp/A/4e78b58eeac441dc825ff749aec29387 is 50, key is test_row_0/A:col10/1732724525609/Put/seqid=0 2024-11-27T16:22:05,759 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-27T16:22:05,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] procedure2.ProcedureExecutor(1098): Stored pid=69, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=69, table=TestAcidGuarantees 2024-11-27T16:22:05,761 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=69, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=69, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-27T16:22:05,762 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=69, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=69, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-27T16:22:05,762 DEBUG [hconnection-0x5c24314-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-27T16:22:05,762 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=70, ppid=69, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-27T16:22:05,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=69 2024-11-27T16:22:05,764 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54232, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-27T16:22:05,772 DEBUG [hconnection-0x757aa44f-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-27T16:22:05,772 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:05,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 2 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54232 deadline: 1732724585769, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:05,773 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:05,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54196 deadline: 1732724585770, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:05,773 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:05,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54212 deadline: 1732724585771, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:05,774 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54240, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-27T16:22:05,779 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:05,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 2 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54240 deadline: 1732724585775, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:05,815 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742088_1264 (size=12001) 2024-11-27T16:22:05,823 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:05,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 4 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54228 deadline: 1732724585821, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:05,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=69 2024-11-27T16:22:05,878 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:05,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 4 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54232 deadline: 1732724585878, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:05,885 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:05,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 4 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54240 deadline: 1732724585881, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:05,919 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:22:05,919 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=70 2024-11-27T16:22:05,919 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574. 2024-11-27T16:22:05,919 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574. as already flushing 2024-11-27T16:22:05,919 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574. 2024-11-27T16:22:05,920 ERROR [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] handler.RSProcedureHandler(58): pid=70 java.io.IOException: Unable to complete flush {ENCODED => b7c24f821c64d1ed1608bef04711b574, NAME => 'TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:22:05,920 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=70 java.io.IOException: Unable to complete flush {ENCODED => b7c24f821c64d1ed1608bef04711b574, NAME => 'TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:22:05,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4114): Remote procedure failed, pid=70 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b7c24f821c64d1ed1608bef04711b574, NAME => 'TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b7c24f821c64d1ed1608bef04711b574, NAME => 'TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:22:05,978 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:05,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54196 deadline: 1732724585975, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:05,985 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:05,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54212 deadline: 1732724585981, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:06,037 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:06,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 6 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54228 deadline: 1732724586031, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:06,076 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:22:06,077 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=70 2024-11-27T16:22:06,077 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574. 2024-11-27T16:22:06,077 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574. as already flushing 2024-11-27T16:22:06,077 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574. 2024-11-27T16:22:06,077 ERROR [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] handler.RSProcedureHandler(58): pid=70 java.io.IOException: Unable to complete flush {ENCODED => b7c24f821c64d1ed1608bef04711b574, NAME => 'TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:22:06,077 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=70 java.io.IOException: Unable to complete flush {ENCODED => b7c24f821c64d1ed1608bef04711b574, NAME => 'TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:22:06,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4114): Remote procedure failed, pid=70 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b7c24f821c64d1ed1608bef04711b574, NAME => 'TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b7c24f821c64d1ed1608bef04711b574, NAME => 'TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:22:06,084 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:06,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 6 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54232 deadline: 1732724586080, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:06,094 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:06,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 6 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54240 deadline: 1732724586090, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:06,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=69 2024-11-27T16:22:06,216 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=12 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp/A/4e78b58eeac441dc825ff749aec29387 2024-11-27T16:22:06,229 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:22:06,229 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=70 2024-11-27T16:22:06,230 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574. 2024-11-27T16:22:06,230 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574. as already flushing 2024-11-27T16:22:06,230 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574. 2024-11-27T16:22:06,230 ERROR [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] handler.RSProcedureHandler(58): pid=70 java.io.IOException: Unable to complete flush {ENCODED => b7c24f821c64d1ed1608bef04711b574, NAME => 'TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:22:06,230 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=70 java.io.IOException: Unable to complete flush {ENCODED => b7c24f821c64d1ed1608bef04711b574, NAME => 'TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:22:06,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4114): Remote procedure failed, pid=70 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b7c24f821c64d1ed1608bef04711b574, NAME => 'TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b7c24f821c64d1ed1608bef04711b574, NAME => 'TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:22:06,269 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp/B/26ec322b7e9b401cbdb31aa337748eb2 is 50, key is test_row_0/B:col10/1732724525609/Put/seqid=0 2024-11-27T16:22:06,295 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:06,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54196 deadline: 1732724586292, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:06,295 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:06,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54212 deadline: 1732724586292, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:06,304 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742089_1265 (size=12001) 2024-11-27T16:22:06,305 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=12 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp/B/26ec322b7e9b401cbdb31aa337748eb2 2024-11-27T16:22:06,347 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:06,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54228 deadline: 1732724586343, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:06,384 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:22:06,385 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=70 2024-11-27T16:22:06,385 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574. 2024-11-27T16:22:06,385 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574. as already flushing 2024-11-27T16:22:06,385 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574. 2024-11-27T16:22:06,385 ERROR [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] handler.RSProcedureHandler(58): pid=70 java.io.IOException: Unable to complete flush {ENCODED => b7c24f821c64d1ed1608bef04711b574, NAME => 'TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:22:06,385 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=70 java.io.IOException: Unable to complete flush {ENCODED => b7c24f821c64d1ed1608bef04711b574, NAME => 'TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:22:06,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4114): Remote procedure failed, pid=70 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b7c24f821c64d1ed1608bef04711b574, NAME => 'TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b7c24f821c64d1ed1608bef04711b574, NAME => 'TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:22:06,388 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp/C/c7b2d77f530548e992e33fe0f81f7a3c is 50, key is test_row_0/C:col10/1732724525609/Put/seqid=0 2024-11-27T16:22:06,389 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:06,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54232 deadline: 1732724586385, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:06,400 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:06,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54240 deadline: 1732724586396, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:06,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=69 2024-11-27T16:22:06,413 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742090_1266 (size=12001) 2024-11-27T16:22:06,414 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=12 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp/C/c7b2d77f530548e992e33fe0f81f7a3c 2024-11-27T16:22:06,420 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp/A/4e78b58eeac441dc825ff749aec29387 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/A/4e78b58eeac441dc825ff749aec29387 2024-11-27T16:22:06,424 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/A/4e78b58eeac441dc825ff749aec29387, entries=150, sequenceid=12, filesize=11.7 K 2024-11-27T16:22:06,425 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp/B/26ec322b7e9b401cbdb31aa337748eb2 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/B/26ec322b7e9b401cbdb31aa337748eb2 2024-11-27T16:22:06,431 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/B/26ec322b7e9b401cbdb31aa337748eb2, entries=150, sequenceid=12, filesize=11.7 K 2024-11-27T16:22:06,436 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp/C/c7b2d77f530548e992e33fe0f81f7a3c as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/C/c7b2d77f530548e992e33fe0f81f7a3c 2024-11-27T16:22:06,442 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/C/c7b2d77f530548e992e33fe0f81f7a3c, entries=150, sequenceid=12, filesize=11.7 K 2024-11-27T16:22:06,443 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for b7c24f821c64d1ed1608bef04711b574 in 823ms, sequenceid=12, compaction requested=false 2024-11-27T16:22:06,443 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for b7c24f821c64d1ed1608bef04711b574: 2024-11-27T16:22:06,548 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:22:06,549 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=70 2024-11-27T16:22:06,549 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574. 2024-11-27T16:22:06,549 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HRegion(2837): Flushing b7c24f821c64d1ed1608bef04711b574 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-27T16:22:06,550 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b7c24f821c64d1ed1608bef04711b574, store=A 2024-11-27T16:22:06,550 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:22:06,550 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b7c24f821c64d1ed1608bef04711b574, store=B 2024-11-27T16:22:06,550 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:22:06,550 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b7c24f821c64d1ed1608bef04711b574, store=C 2024-11-27T16:22:06,550 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:22:06,555 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp/A/8cd593c8ebd54ffd91eea37108837286 is 50, key is test_row_0/A:col10/1732724525664/Put/seqid=0 2024-11-27T16:22:06,567 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742091_1267 (size=12001) 2024-11-27T16:22:06,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(8581): Flush requested on b7c24f821c64d1ed1608bef04711b574 2024-11-27T16:22:06,804 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574. as already flushing 2024-11-27T16:22:06,818 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:06,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54196 deadline: 1732724586815, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:06,819 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:06,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54212 deadline: 1732724586817, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:06,851 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:06,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54228 deadline: 1732724586849, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:06,892 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:06,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54232 deadline: 1732724586891, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:06,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=69 2024-11-27T16:22:06,920 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:06,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54240 deadline: 1732724586908, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:06,923 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:06,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54196 deadline: 1732724586920, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:06,924 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:06,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54212 deadline: 1732724586921, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:06,968 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=37 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp/A/8cd593c8ebd54ffd91eea37108837286 2024-11-27T16:22:06,993 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp/B/52b1bd2d6ff64bce95bd99a81012fbe3 is 50, key is test_row_0/B:col10/1732724525664/Put/seqid=0 2024-11-27T16:22:07,065 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742092_1268 (size=12001) 2024-11-27T16:22:07,126 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:07,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54196 deadline: 1732724587125, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:07,126 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:07,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54212 deadline: 1732724587125, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:07,430 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:07,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54196 deadline: 1732724587428, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:07,431 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:07,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54212 deadline: 1732724587430, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:07,466 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=37 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp/B/52b1bd2d6ff64bce95bd99a81012fbe3 2024-11-27T16:22:07,476 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp/C/f3449e25f1e94908b4f15b407ce83594 is 50, key is test_row_0/C:col10/1732724525664/Put/seqid=0 2024-11-27T16:22:07,484 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742093_1269 (size=12001) 2024-11-27T16:22:07,854 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:07,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54228 deadline: 1732724587852, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:07,885 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=37 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp/C/f3449e25f1e94908b4f15b407ce83594 2024-11-27T16:22:07,892 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp/A/8cd593c8ebd54ffd91eea37108837286 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/A/8cd593c8ebd54ffd91eea37108837286 2024-11-27T16:22:07,897 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/A/8cd593c8ebd54ffd91eea37108837286, entries=150, sequenceid=37, filesize=11.7 K 2024-11-27T16:22:07,898 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp/B/52b1bd2d6ff64bce95bd99a81012fbe3 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/B/52b1bd2d6ff64bce95bd99a81012fbe3 2024-11-27T16:22:07,901 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:07,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54232 deadline: 1732724587898, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:07,906 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/B/52b1bd2d6ff64bce95bd99a81012fbe3, entries=150, sequenceid=37, filesize=11.7 K 2024-11-27T16:22:07,907 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp/C/f3449e25f1e94908b4f15b407ce83594 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/C/f3449e25f1e94908b4f15b407ce83594 2024-11-27T16:22:07,913 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/C/f3449e25f1e94908b4f15b407ce83594, entries=150, sequenceid=37, filesize=11.7 K 2024-11-27T16:22:07,914 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for b7c24f821c64d1ed1608bef04711b574 in 1365ms, sequenceid=37, compaction requested=false 2024-11-27T16:22:07,914 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HRegion(2538): Flush status journal for b7c24f821c64d1ed1608bef04711b574: 2024-11-27T16:22:07,914 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574. 2024-11-27T16:22:07,914 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=70 2024-11-27T16:22:07,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4106): Remote procedure done, pid=70 2024-11-27T16:22:07,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=69 2024-11-27T16:22:07,918 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=70, resume processing ppid=69 2024-11-27T16:22:07,918 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=70, ppid=69, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.1540 sec 2024-11-27T16:22:07,920 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=69, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=69, table=TestAcidGuarantees in 2.1590 sec 2024-11-27T16:22:07,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(8581): Flush requested on b7c24f821c64d1ed1608bef04711b574 2024-11-27T16:22:07,930 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing b7c24f821c64d1ed1608bef04711b574 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-11-27T16:22:07,930 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b7c24f821c64d1ed1608bef04711b574, store=A 2024-11-27T16:22:07,930 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:22:07,930 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b7c24f821c64d1ed1608bef04711b574, store=B 2024-11-27T16:22:07,930 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:22:07,930 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b7c24f821c64d1ed1608bef04711b574, store=C 2024-11-27T16:22:07,930 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:22:07,942 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp/A/4fb7ded2bd00458daca6d35ef8c085bb is 50, key is test_row_0/A:col10/1732724526810/Put/seqid=0 2024-11-27T16:22:07,948 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742094_1270 (size=12001) 2024-11-27T16:22:07,949 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=49 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp/A/4fb7ded2bd00458daca6d35ef8c085bb 2024-11-27T16:22:07,959 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp/B/2913bfac8992482b90a9c90794354571 is 50, key is test_row_0/B:col10/1732724526810/Put/seqid=0 2024-11-27T16:22:07,966 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742095_1271 (size=12001) 2024-11-27T16:22:07,967 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=49 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp/B/2913bfac8992482b90a9c90794354571 2024-11-27T16:22:07,974 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:07,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54196 deadline: 1732724587972, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:07,978 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:07,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54240 deadline: 1732724587976, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:07,980 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:07,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54212 deadline: 1732724587980, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:07,990 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp/C/8e4425f92f504effb3974aa76da9edb1 is 50, key is test_row_0/C:col10/1732724526810/Put/seqid=0 2024-11-27T16:22:08,022 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742096_1272 (size=12001) 2024-11-27T16:22:08,077 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:08,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54196 deadline: 1732724588075, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:08,081 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:08,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54240 deadline: 1732724588079, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:08,085 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:08,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54212 deadline: 1732724588083, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:08,265 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-11-27T16:22:08,281 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:08,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54196 deadline: 1732724588280, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:08,283 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:08,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54240 deadline: 1732724588282, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:08,288 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:08,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54212 deadline: 1732724588287, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:08,423 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=49 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp/C/8e4425f92f504effb3974aa76da9edb1 2024-11-27T16:22:08,428 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp/A/4fb7ded2bd00458daca6d35ef8c085bb as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/A/4fb7ded2bd00458daca6d35ef8c085bb 2024-11-27T16:22:08,432 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/A/4fb7ded2bd00458daca6d35ef8c085bb, entries=150, sequenceid=49, filesize=11.7 K 2024-11-27T16:22:08,434 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp/B/2913bfac8992482b90a9c90794354571 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/B/2913bfac8992482b90a9c90794354571 2024-11-27T16:22:08,437 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/B/2913bfac8992482b90a9c90794354571, entries=150, sequenceid=49, filesize=11.7 K 2024-11-27T16:22:08,438 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp/C/8e4425f92f504effb3974aa76da9edb1 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/C/8e4425f92f504effb3974aa76da9edb1 2024-11-27T16:22:08,443 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/C/8e4425f92f504effb3974aa76da9edb1, entries=150, sequenceid=49, filesize=11.7 K 2024-11-27T16:22:08,444 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=147.60 KB/151140 for b7c24f821c64d1ed1608bef04711b574 in 514ms, sequenceid=49, compaction requested=true 2024-11-27T16:22:08,444 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for b7c24f821c64d1ed1608bef04711b574: 2024-11-27T16:22:08,444 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b7c24f821c64d1ed1608bef04711b574:A, priority=-2147483648, current under compaction store size is 1 2024-11-27T16:22:08,444 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-27T16:22:08,444 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T16:22:08,444 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-27T16:22:08,445 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b7c24f821c64d1ed1608bef04711b574:B, priority=-2147483648, current under compaction store size is 2 2024-11-27T16:22:08,445 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T16:22:08,445 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b7c24f821c64d1ed1608bef04711b574:C, priority=-2147483648, current under compaction store size is 3 2024-11-27T16:22:08,445 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-27T16:22:08,445 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-27T16:22:08,446 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HStore(1540): b7c24f821c64d1ed1608bef04711b574/A is initiating minor compaction (all files) 2024-11-27T16:22:08,446 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of b7c24f821c64d1ed1608bef04711b574/A in TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574. 2024-11-27T16:22:08,446 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/A/4e78b58eeac441dc825ff749aec29387, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/A/8cd593c8ebd54ffd91eea37108837286, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/A/4fb7ded2bd00458daca6d35ef8c085bb] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp, totalSize=35.2 K 2024-11-27T16:22:08,446 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-27T16:22:08,446 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HStore(1540): b7c24f821c64d1ed1608bef04711b574/B is initiating minor compaction (all files) 2024-11-27T16:22:08,446 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of b7c24f821c64d1ed1608bef04711b574/B in TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574. 2024-11-27T16:22:08,446 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/B/26ec322b7e9b401cbdb31aa337748eb2, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/B/52b1bd2d6ff64bce95bd99a81012fbe3, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/B/2913bfac8992482b90a9c90794354571] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp, totalSize=35.2 K 2024-11-27T16:22:08,447 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4e78b58eeac441dc825ff749aec29387, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=12, earliestPutTs=1732724525609 2024-11-27T16:22:08,447 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8cd593c8ebd54ffd91eea37108837286, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=37, earliestPutTs=1732724525659 2024-11-27T16:22:08,447 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting 26ec322b7e9b401cbdb31aa337748eb2, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=12, earliestPutTs=1732724525609 2024-11-27T16:22:08,447 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting 52b1bd2d6ff64bce95bd99a81012fbe3, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=37, earliestPutTs=1732724525659 2024-11-27T16:22:08,447 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4fb7ded2bd00458daca6d35ef8c085bb, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=49, earliestPutTs=1732724526810 2024-11-27T16:22:08,448 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting 2913bfac8992482b90a9c90794354571, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=49, earliestPutTs=1732724526810 2024-11-27T16:22:08,458 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): b7c24f821c64d1ed1608bef04711b574#A#compaction#222 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 53.85 MB/second 2024-11-27T16:22:08,459 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp/A/b54f9db04aa94fb09f6ef783367fc4d5 is 50, key is test_row_0/A:col10/1732724526810/Put/seqid=0 2024-11-27T16:22:08,462 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): b7c24f821c64d1ed1608bef04711b574#B#compaction#223 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 53.85 MB/second 2024-11-27T16:22:08,462 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp/B/8b4e9d3c36ab4652bde3c0550dff57f3 is 50, key is test_row_0/B:col10/1732724526810/Put/seqid=0 2024-11-27T16:22:08,490 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742097_1273 (size=12104) 2024-11-27T16:22:08,497 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742098_1274 (size=12104) 2024-11-27T16:22:08,498 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp/A/b54f9db04aa94fb09f6ef783367fc4d5 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/A/b54f9db04aa94fb09f6ef783367fc4d5 2024-11-27T16:22:08,509 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in b7c24f821c64d1ed1608bef04711b574/A of b7c24f821c64d1ed1608bef04711b574 into b54f9db04aa94fb09f6ef783367fc4d5(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T16:22:08,509 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for b7c24f821c64d1ed1608bef04711b574: 2024-11-27T16:22:08,509 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574., storeName=b7c24f821c64d1ed1608bef04711b574/A, priority=13, startTime=1732724528444; duration=0sec 2024-11-27T16:22:08,509 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-27T16:22:08,509 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b7c24f821c64d1ed1608bef04711b574:A 2024-11-27T16:22:08,509 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-27T16:22:08,511 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-27T16:22:08,511 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HStore(1540): b7c24f821c64d1ed1608bef04711b574/C is initiating minor compaction (all files) 2024-11-27T16:22:08,511 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of b7c24f821c64d1ed1608bef04711b574/C in TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574. 2024-11-27T16:22:08,511 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp/B/8b4e9d3c36ab4652bde3c0550dff57f3 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/B/8b4e9d3c36ab4652bde3c0550dff57f3 2024-11-27T16:22:08,511 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/C/c7b2d77f530548e992e33fe0f81f7a3c, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/C/f3449e25f1e94908b4f15b407ce83594, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/C/8e4425f92f504effb3974aa76da9edb1] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp, totalSize=35.2 K 2024-11-27T16:22:08,512 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.Compactor(224): Compacting c7b2d77f530548e992e33fe0f81f7a3c, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=12, earliestPutTs=1732724525609 2024-11-27T16:22:08,512 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.Compactor(224): Compacting f3449e25f1e94908b4f15b407ce83594, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=37, earliestPutTs=1732724525659 2024-11-27T16:22:08,513 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8e4425f92f504effb3974aa76da9edb1, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=49, earliestPutTs=1732724526810 2024-11-27T16:22:08,521 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in b7c24f821c64d1ed1608bef04711b574/B of b7c24f821c64d1ed1608bef04711b574 into 8b4e9d3c36ab4652bde3c0550dff57f3(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T16:22:08,521 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for b7c24f821c64d1ed1608bef04711b574: 2024-11-27T16:22:08,521 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574., storeName=b7c24f821c64d1ed1608bef04711b574/B, priority=13, startTime=1732724528444; duration=0sec 2024-11-27T16:22:08,521 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T16:22:08,521 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b7c24f821c64d1ed1608bef04711b574:B 2024-11-27T16:22:08,528 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): b7c24f821c64d1ed1608bef04711b574#C#compaction#224 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 53.85 MB/second 2024-11-27T16:22:08,530 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp/C/e9b048e844f3430b8e8f7f1991641724 is 50, key is test_row_0/C:col10/1732724526810/Put/seqid=0 2024-11-27T16:22:08,550 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742099_1275 (size=12104) 2024-11-27T16:22:08,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(8581): Flush requested on b7c24f821c64d1ed1608bef04711b574 2024-11-27T16:22:08,585 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing b7c24f821c64d1ed1608bef04711b574 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-11-27T16:22:08,586 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b7c24f821c64d1ed1608bef04711b574, store=A 2024-11-27T16:22:08,586 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:22:08,586 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b7c24f821c64d1ed1608bef04711b574, store=B 2024-11-27T16:22:08,586 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:22:08,586 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b7c24f821c64d1ed1608bef04711b574, store=C 2024-11-27T16:22:08,586 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:22:08,603 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:08,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54240 deadline: 1732724588600, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:08,604 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:08,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54212 deadline: 1732724588601, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:08,604 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:08,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54196 deadline: 1732724588602, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:08,622 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp/A/8de5f716b98d42a785db0313ffa02aa9 is 50, key is test_row_0/A:col10/1732724528584/Put/seqid=0 2024-11-27T16:22:08,631 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742100_1276 (size=12001) 2024-11-27T16:22:08,632 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=77 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp/A/8de5f716b98d42a785db0313ffa02aa9 2024-11-27T16:22:08,641 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp/B/4189e9ad5d4544c1a7bfc5427f0bddb1 is 50, key is test_row_0/B:col10/1732724528584/Put/seqid=0 2024-11-27T16:22:08,661 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742101_1277 (size=12001) 2024-11-27T16:22:08,662 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=77 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp/B/4189e9ad5d4544c1a7bfc5427f0bddb1 2024-11-27T16:22:08,671 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp/C/d10eb6c22482409da5ebd12481aeb49b is 50, key is test_row_0/C:col10/1732724528584/Put/seqid=0 2024-11-27T16:22:08,682 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742102_1278 (size=12001) 2024-11-27T16:22:08,683 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=77 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp/C/d10eb6c22482409da5ebd12481aeb49b 2024-11-27T16:22:08,692 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp/A/8de5f716b98d42a785db0313ffa02aa9 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/A/8de5f716b98d42a785db0313ffa02aa9 2024-11-27T16:22:08,698 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/A/8de5f716b98d42a785db0313ffa02aa9, entries=150, sequenceid=77, filesize=11.7 K 2024-11-27T16:22:08,699 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp/B/4189e9ad5d4544c1a7bfc5427f0bddb1 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/B/4189e9ad5d4544c1a7bfc5427f0bddb1 2024-11-27T16:22:08,704 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/B/4189e9ad5d4544c1a7bfc5427f0bddb1, entries=150, sequenceid=77, filesize=11.7 K 2024-11-27T16:22:08,705 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:08,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54240 deadline: 1732724588705, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:08,706 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp/C/d10eb6c22482409da5ebd12481aeb49b as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/C/d10eb6c22482409da5ebd12481aeb49b 2024-11-27T16:22:08,708 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:08,708 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:08,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54212 deadline: 1732724588705, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:08,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54196 deadline: 1732724588706, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:08,712 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/C/d10eb6c22482409da5ebd12481aeb49b, entries=150, sequenceid=77, filesize=11.7 K 2024-11-27T16:22:08,713 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=53.67 KB/54960 for b7c24f821c64d1ed1608bef04711b574 in 128ms, sequenceid=77, compaction requested=false 2024-11-27T16:22:08,713 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for b7c24f821c64d1ed1608bef04711b574: 2024-11-27T16:22:08,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(8581): Flush requested on b7c24f821c64d1ed1608bef04711b574 2024-11-27T16:22:08,910 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing b7c24f821c64d1ed1608bef04711b574 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-11-27T16:22:08,910 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b7c24f821c64d1ed1608bef04711b574, store=A 2024-11-27T16:22:08,910 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:22:08,910 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b7c24f821c64d1ed1608bef04711b574, store=B 2024-11-27T16:22:08,910 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:22:08,910 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b7c24f821c64d1ed1608bef04711b574, store=C 2024-11-27T16:22:08,910 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:22:08,915 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp/A/4369c3d48889435783f3fdc5cfbbbe02 is 50, key is test_row_0/A:col10/1732724528908/Put/seqid=0 2024-11-27T16:22:08,942 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742103_1279 (size=12001) 2024-11-27T16:22:08,944 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=89 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp/A/4369c3d48889435783f3fdc5cfbbbe02 2024-11-27T16:22:08,945 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:08,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54196 deadline: 1732724588942, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:08,947 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:08,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54212 deadline: 1732724588943, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:08,949 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:08,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54240 deadline: 1732724588945, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:08,959 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp/B/8c1c44b782304801bec31dc57b129761 is 50, key is test_row_0/B:col10/1732724528908/Put/seqid=0 2024-11-27T16:22:08,960 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp/C/e9b048e844f3430b8e8f7f1991641724 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/C/e9b048e844f3430b8e8f7f1991641724 2024-11-27T16:22:08,974 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in b7c24f821c64d1ed1608bef04711b574/C of b7c24f821c64d1ed1608bef04711b574 into e9b048e844f3430b8e8f7f1991641724(size=11.8 K), total size for store is 23.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T16:22:08,974 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for b7c24f821c64d1ed1608bef04711b574: 2024-11-27T16:22:08,974 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574., storeName=b7c24f821c64d1ed1608bef04711b574/C, priority=13, startTime=1732724528445; duration=0sec 2024-11-27T16:22:08,974 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T16:22:08,974 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b7c24f821c64d1ed1608bef04711b574:C 2024-11-27T16:22:08,998 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742104_1280 (size=12001) 2024-11-27T16:22:08,999 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=89 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp/B/8c1c44b782304801bec31dc57b129761 2024-11-27T16:22:09,013 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp/C/66108b2253154ade8c8f37ea74e8b530 is 50, key is test_row_0/C:col10/1732724528908/Put/seqid=0 2024-11-27T16:22:09,031 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742105_1281 (size=12001) 2024-11-27T16:22:09,038 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=89 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp/C/66108b2253154ade8c8f37ea74e8b530 2024-11-27T16:22:09,045 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp/A/4369c3d48889435783f3fdc5cfbbbe02 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/A/4369c3d48889435783f3fdc5cfbbbe02 2024-11-27T16:22:09,048 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:09,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54196 deadline: 1732724589047, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:09,049 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/A/4369c3d48889435783f3fdc5cfbbbe02, entries=150, sequenceid=89, filesize=11.7 K 2024-11-27T16:22:09,050 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:09,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54212 deadline: 1732724589048, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:09,051 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp/B/8c1c44b782304801bec31dc57b129761 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/B/8c1c44b782304801bec31dc57b129761 2024-11-27T16:22:09,055 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:09,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54240 deadline: 1732724589053, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:09,057 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/B/8c1c44b782304801bec31dc57b129761, entries=150, sequenceid=89, filesize=11.7 K 2024-11-27T16:22:09,058 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp/C/66108b2253154ade8c8f37ea74e8b530 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/C/66108b2253154ade8c8f37ea74e8b530 2024-11-27T16:22:09,062 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/C/66108b2253154ade8c8f37ea74e8b530, entries=150, sequenceid=89, filesize=11.7 K 2024-11-27T16:22:09,063 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for b7c24f821c64d1ed1608bef04711b574 in 154ms, sequenceid=89, compaction requested=true 2024-11-27T16:22:09,063 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for b7c24f821c64d1ed1608bef04711b574: 2024-11-27T16:22:09,063 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b7c24f821c64d1ed1608bef04711b574:A, priority=-2147483648, current under compaction store size is 1 2024-11-27T16:22:09,063 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T16:22:09,063 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b7c24f821c64d1ed1608bef04711b574:B, priority=-2147483648, current under compaction store size is 2 2024-11-27T16:22:09,063 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-27T16:22:09,063 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-27T16:22:09,063 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b7c24f821c64d1ed1608bef04711b574:C, priority=-2147483648, current under compaction store size is 3 2024-11-27T16:22:09,063 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-11-27T16:22:09,063 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-27T16:22:09,065 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-27T16:22:09,066 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HStore(1540): b7c24f821c64d1ed1608bef04711b574/A is initiating minor compaction (all files) 2024-11-27T16:22:09,066 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of b7c24f821c64d1ed1608bef04711b574/A in TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574. 2024-11-27T16:22:09,066 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/A/b54f9db04aa94fb09f6ef783367fc4d5, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/A/8de5f716b98d42a785db0313ffa02aa9, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/A/4369c3d48889435783f3fdc5cfbbbe02] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp, totalSize=35.3 K 2024-11-27T16:22:09,066 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-27T16:22:09,066 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HStore(1540): b7c24f821c64d1ed1608bef04711b574/B is initiating minor compaction (all files) 2024-11-27T16:22:09,066 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of b7c24f821c64d1ed1608bef04711b574/B in TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574. 2024-11-27T16:22:09,066 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/B/8b4e9d3c36ab4652bde3c0550dff57f3, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/B/4189e9ad5d4544c1a7bfc5427f0bddb1, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/B/8c1c44b782304801bec31dc57b129761] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp, totalSize=35.3 K 2024-11-27T16:22:09,067 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.Compactor(224): Compacting b54f9db04aa94fb09f6ef783367fc4d5, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=49, earliestPutTs=1732724526810 2024-11-27T16:22:09,067 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting 8b4e9d3c36ab4652bde3c0550dff57f3, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=49, earliestPutTs=1732724526810 2024-11-27T16:22:09,068 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8de5f716b98d42a785db0313ffa02aa9, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=77, earliestPutTs=1732724527957 2024-11-27T16:22:09,069 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting 4189e9ad5d4544c1a7bfc5427f0bddb1, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=77, earliestPutTs=1732724527957 2024-11-27T16:22:09,069 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4369c3d48889435783f3fdc5cfbbbe02, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=89, earliestPutTs=1732724528600 2024-11-27T16:22:09,070 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting 8c1c44b782304801bec31dc57b129761, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=89, earliestPutTs=1732724528600 2024-11-27T16:22:09,081 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): b7c24f821c64d1ed1608bef04711b574#A#compaction#231 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 53.85 MB/second 2024-11-27T16:22:09,082 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp/A/0f021abf0a7a44fb92a0989baec0189c is 50, key is test_row_0/A:col10/1732724528908/Put/seqid=0 2024-11-27T16:22:09,094 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): b7c24f821c64d1ed1608bef04711b574#B#compaction#232 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 53.85 MB/second 2024-11-27T16:22:09,095 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp/B/a070bef300bd459aa9b05a97b0138220 is 50, key is test_row_0/B:col10/1732724528908/Put/seqid=0 2024-11-27T16:22:09,118 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742106_1282 (size=12207) 2024-11-27T16:22:09,125 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742107_1283 (size=12207) 2024-11-27T16:22:09,139 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp/B/a070bef300bd459aa9b05a97b0138220 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/B/a070bef300bd459aa9b05a97b0138220 2024-11-27T16:22:09,153 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in b7c24f821c64d1ed1608bef04711b574/B of b7c24f821c64d1ed1608bef04711b574 into a070bef300bd459aa9b05a97b0138220(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T16:22:09,153 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for b7c24f821c64d1ed1608bef04711b574: 2024-11-27T16:22:09,153 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574., storeName=b7c24f821c64d1ed1608bef04711b574/B, priority=13, startTime=1732724529063; duration=0sec 2024-11-27T16:22:09,153 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-27T16:22:09,153 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b7c24f821c64d1ed1608bef04711b574:B 2024-11-27T16:22:09,153 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-27T16:22:09,155 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-27T16:22:09,155 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HStore(1540): b7c24f821c64d1ed1608bef04711b574/C is initiating minor compaction (all files) 2024-11-27T16:22:09,155 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of b7c24f821c64d1ed1608bef04711b574/C in TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574. 2024-11-27T16:22:09,155 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/C/e9b048e844f3430b8e8f7f1991641724, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/C/d10eb6c22482409da5ebd12481aeb49b, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/C/66108b2253154ade8c8f37ea74e8b530] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp, totalSize=35.3 K 2024-11-27T16:22:09,156 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting e9b048e844f3430b8e8f7f1991641724, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=49, earliestPutTs=1732724526810 2024-11-27T16:22:09,156 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting d10eb6c22482409da5ebd12481aeb49b, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=77, earliestPutTs=1732724527957 2024-11-27T16:22:09,157 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting 66108b2253154ade8c8f37ea74e8b530, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=89, earliestPutTs=1732724528600 2024-11-27T16:22:09,190 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): b7c24f821c64d1ed1608bef04711b574#C#compaction#233 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 53.85 MB/second 2024-11-27T16:22:09,190 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp/C/d37e6a157e1541e8805f98d659535b4f is 50, key is test_row_0/C:col10/1732724528908/Put/seqid=0 2024-11-27T16:22:09,214 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742108_1284 (size=12207) 2024-11-27T16:22:09,220 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp/C/d37e6a157e1541e8805f98d659535b4f as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/C/d37e6a157e1541e8805f98d659535b4f 2024-11-27T16:22:09,229 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in b7c24f821c64d1ed1608bef04711b574/C of b7c24f821c64d1ed1608bef04711b574 into d37e6a157e1541e8805f98d659535b4f(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T16:22:09,229 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for b7c24f821c64d1ed1608bef04711b574: 2024-11-27T16:22:09,229 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574., storeName=b7c24f821c64d1ed1608bef04711b574/C, priority=13, startTime=1732724529063; duration=0sec 2024-11-27T16:22:09,231 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T16:22:09,231 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b7c24f821c64d1ed1608bef04711b574:C 2024-11-27T16:22:09,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(8581): Flush requested on b7c24f821c64d1ed1608bef04711b574 2024-11-27T16:22:09,252 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing b7c24f821c64d1ed1608bef04711b574 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-27T16:22:09,253 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b7c24f821c64d1ed1608bef04711b574, store=A 2024-11-27T16:22:09,253 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:22:09,253 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b7c24f821c64d1ed1608bef04711b574, store=B 2024-11-27T16:22:09,253 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:22:09,253 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b7c24f821c64d1ed1608bef04711b574, store=C 2024-11-27T16:22:09,253 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:22:09,258 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp/A/7150e27bc9714340a87474ebda76a204 is 50, key is test_row_0/A:col10/1732724529251/Put/seqid=0 2024-11-27T16:22:09,264 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:09,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54196 deadline: 1732724589262, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:09,266 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:09,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54212 deadline: 1732724589264, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:09,267 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:09,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54240 deadline: 1732724589265, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:09,268 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742109_1285 (size=12001) 2024-11-27T16:22:09,366 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:09,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54196 deadline: 1732724589365, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:09,369 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:09,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54212 deadline: 1732724589367, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:09,370 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:09,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54240 deadline: 1732724589368, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:09,524 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp/A/0f021abf0a7a44fb92a0989baec0189c as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/A/0f021abf0a7a44fb92a0989baec0189c 2024-11-27T16:22:09,529 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in b7c24f821c64d1ed1608bef04711b574/A of b7c24f821c64d1ed1608bef04711b574 into 0f021abf0a7a44fb92a0989baec0189c(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T16:22:09,529 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for b7c24f821c64d1ed1608bef04711b574: 2024-11-27T16:22:09,529 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574., storeName=b7c24f821c64d1ed1608bef04711b574/A, priority=13, startTime=1732724529063; duration=0sec 2024-11-27T16:22:09,529 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T16:22:09,529 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b7c24f821c64d1ed1608bef04711b574:A 2024-11-27T16:22:09,568 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:09,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54196 deadline: 1732724589567, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:09,572 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:09,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54240 deadline: 1732724589571, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:09,573 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:09,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54212 deadline: 1732724589571, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:09,669 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=117 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp/A/7150e27bc9714340a87474ebda76a204 2024-11-27T16:22:09,677 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp/B/fc4dec5df0fd4169bb5d3c3b98a566e5 is 50, key is test_row_0/B:col10/1732724529251/Put/seqid=0 2024-11-27T16:22:09,682 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742110_1286 (size=12001) 2024-11-27T16:22:09,871 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:09,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54228 deadline: 1732724589870, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:09,872 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:09,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54196 deadline: 1732724589871, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:09,873 DEBUG [Thread-1205 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4200 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574., hostname=7b191dec6496,44169,1732724452967, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-27T16:22:09,875 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:09,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54212 deadline: 1732724589874, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:09,880 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:09,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54240 deadline: 1732724589879, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:09,912 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:09,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54232 deadline: 1732724589911, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:09,913 DEBUG [Thread-1207 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4233 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574., hostname=7b191dec6496,44169,1732724452967, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-27T16:22:09,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=69 2024-11-27T16:22:09,924 INFO [Thread-1213 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 69 completed 2024-11-27T16:22:09,926 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-27T16:22:09,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] procedure2.ProcedureExecutor(1098): Stored pid=71, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=71, table=TestAcidGuarantees 2024-11-27T16:22:09,927 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=71, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=71, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-27T16:22:09,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=71 2024-11-27T16:22:09,928 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=71, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=71, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-27T16:22:09,928 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=72, ppid=71, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-27T16:22:10,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=71 2024-11-27T16:22:10,081 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:22:10,082 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=72 2024-11-27T16:22:10,082 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574. 2024-11-27T16:22:10,082 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574. as already flushing 2024-11-27T16:22:10,082 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574. 2024-11-27T16:22:10,082 ERROR [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] handler.RSProcedureHandler(58): pid=72 java.io.IOException: Unable to complete flush {ENCODED => b7c24f821c64d1ed1608bef04711b574, NAME => 'TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:22:10,082 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=72 java.io.IOException: Unable to complete flush {ENCODED => b7c24f821c64d1ed1608bef04711b574, NAME => 'TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:22:10,083 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=117 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp/B/fc4dec5df0fd4169bb5d3c3b98a566e5 2024-11-27T16:22:10,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4114): Remote procedure failed, pid=72 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b7c24f821c64d1ed1608bef04711b574, NAME => 'TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b7c24f821c64d1ed1608bef04711b574, NAME => 'TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:22:10,097 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp/C/e711ee5275524fd3a4130d0eb5f7c387 is 50, key is test_row_0/C:col10/1732724529251/Put/seqid=0 2024-11-27T16:22:10,137 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742111_1287 (size=12001) 2024-11-27T16:22:10,138 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=117 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp/C/e711ee5275524fd3a4130d0eb5f7c387 2024-11-27T16:22:10,147 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp/A/7150e27bc9714340a87474ebda76a204 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/A/7150e27bc9714340a87474ebda76a204 2024-11-27T16:22:10,156 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/A/7150e27bc9714340a87474ebda76a204, entries=150, sequenceid=117, filesize=11.7 K 2024-11-27T16:22:10,157 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp/B/fc4dec5df0fd4169bb5d3c3b98a566e5 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/B/fc4dec5df0fd4169bb5d3c3b98a566e5 2024-11-27T16:22:10,161 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/B/fc4dec5df0fd4169bb5d3c3b98a566e5, entries=150, sequenceid=117, filesize=11.7 K 2024-11-27T16:22:10,163 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp/C/e711ee5275524fd3a4130d0eb5f7c387 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/C/e711ee5275524fd3a4130d0eb5f7c387 2024-11-27T16:22:10,173 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/C/e711ee5275524fd3a4130d0eb5f7c387, entries=150, sequenceid=117, filesize=11.7 K 2024-11-27T16:22:10,174 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=60.38 KB/61830 for b7c24f821c64d1ed1608bef04711b574 in 922ms, sequenceid=117, compaction requested=false 2024-11-27T16:22:10,174 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for b7c24f821c64d1ed1608bef04711b574: 2024-11-27T16:22:10,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=71 2024-11-27T16:22:10,236 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:22:10,237 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=72 2024-11-27T16:22:10,237 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574. 2024-11-27T16:22:10,237 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegion(2837): Flushing b7c24f821c64d1ed1608bef04711b574 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-11-27T16:22:10,237 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b7c24f821c64d1ed1608bef04711b574, store=A 2024-11-27T16:22:10,237 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:22:10,237 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b7c24f821c64d1ed1608bef04711b574, store=B 2024-11-27T16:22:10,238 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:22:10,238 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b7c24f821c64d1ed1608bef04711b574, store=C 2024-11-27T16:22:10,238 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:22:10,244 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp/A/881f45e681c942f9b55872138dbcf7b3 is 50, key is test_row_0/A:col10/1732724529263/Put/seqid=0 2024-11-27T16:22:10,279 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742112_1288 (size=12001) 2024-11-27T16:22:10,393 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574. as already flushing 2024-11-27T16:22:10,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(8581): Flush requested on b7c24f821c64d1ed1608bef04711b574 2024-11-27T16:22:10,419 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:10,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54212 deadline: 1732724590416, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:10,420 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:10,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54240 deadline: 1732724590416, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:10,420 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:10,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54196 deadline: 1732724590417, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:10,523 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:10,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54212 deadline: 1732724590521, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:10,523 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:10,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54196 deadline: 1732724590522, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:10,524 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:10,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54240 deadline: 1732724590522, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:10,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=71 2024-11-27T16:22:10,680 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=130 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp/A/881f45e681c942f9b55872138dbcf7b3 2024-11-27T16:22:10,689 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp/B/8f6d613ae31f48baacf7087f205a370a is 50, key is test_row_0/B:col10/1732724529263/Put/seqid=0 2024-11-27T16:22:10,695 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742113_1289 (size=12001) 2024-11-27T16:22:10,696 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=130 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp/B/8f6d613ae31f48baacf7087f205a370a 2024-11-27T16:22:10,705 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp/C/071b08a8b8ba4b59898ea9cf8a3263ec is 50, key is test_row_0/C:col10/1732724529263/Put/seqid=0 2024-11-27T16:22:10,725 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:10,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54212 deadline: 1732724590724, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:10,726 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:10,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54196 deadline: 1732724590725, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:10,726 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742114_1290 (size=12001) 2024-11-27T16:22:10,726 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:10,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54240 deadline: 1732724590725, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:10,727 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=130 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp/C/071b08a8b8ba4b59898ea9cf8a3263ec 2024-11-27T16:22:10,738 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp/A/881f45e681c942f9b55872138dbcf7b3 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/A/881f45e681c942f9b55872138dbcf7b3 2024-11-27T16:22:10,742 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/A/881f45e681c942f9b55872138dbcf7b3, entries=150, sequenceid=130, filesize=11.7 K 2024-11-27T16:22:10,746 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp/B/8f6d613ae31f48baacf7087f205a370a as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/B/8f6d613ae31f48baacf7087f205a370a 2024-11-27T16:22:10,751 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/B/8f6d613ae31f48baacf7087f205a370a, entries=150, sequenceid=130, filesize=11.7 K 2024-11-27T16:22:10,753 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp/C/071b08a8b8ba4b59898ea9cf8a3263ec as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/C/071b08a8b8ba4b59898ea9cf8a3263ec 2024-11-27T16:22:10,759 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/C/071b08a8b8ba4b59898ea9cf8a3263ec, entries=150, sequenceid=130, filesize=11.7 K 2024-11-27T16:22:10,760 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for b7c24f821c64d1ed1608bef04711b574 in 523ms, sequenceid=130, compaction requested=true 2024-11-27T16:22:10,760 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegion(2538): Flush status journal for b7c24f821c64d1ed1608bef04711b574: 2024-11-27T16:22:10,760 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574. 2024-11-27T16:22:10,760 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=72 2024-11-27T16:22:10,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4106): Remote procedure done, pid=72 2024-11-27T16:22:10,764 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=72, resume processing ppid=71 2024-11-27T16:22:10,764 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=72, ppid=71, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 834 msec 2024-11-27T16:22:10,766 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=71, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=71, table=TestAcidGuarantees in 839 msec 2024-11-27T16:22:11,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(8581): Flush requested on b7c24f821c64d1ed1608bef04711b574 2024-11-27T16:22:11,029 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing b7c24f821c64d1ed1608bef04711b574 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-27T16:22:11,031 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b7c24f821c64d1ed1608bef04711b574, store=A 2024-11-27T16:22:11,031 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:22:11,031 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b7c24f821c64d1ed1608bef04711b574, store=B 2024-11-27T16:22:11,031 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:22:11,031 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b7c24f821c64d1ed1608bef04711b574, store=C 2024-11-27T16:22:11,031 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:22:11,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=71 2024-11-27T16:22:11,032 INFO [Thread-1213 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 71 completed 2024-11-27T16:22:11,034 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-27T16:22:11,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] procedure2.ProcedureExecutor(1098): Stored pid=73, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=73, table=TestAcidGuarantees 2024-11-27T16:22:11,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=73 2024-11-27T16:22:11,035 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=73, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=73, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-27T16:22:11,036 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=73, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=73, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-27T16:22:11,036 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=74, ppid=73, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-27T16:22:11,042 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp/A/85fd439bed2543da809075c3b7436ede is 50, key is test_row_0/A:col10/1732724530414/Put/seqid=0 2024-11-27T16:22:11,043 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:11,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54240 deadline: 1732724591039, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:11,043 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:11,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54212 deadline: 1732724591041, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:11,045 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:11,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54196 deadline: 1732724591043, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:11,074 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742115_1291 (size=12151) 2024-11-27T16:22:11,075 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=156 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp/A/85fd439bed2543da809075c3b7436ede 2024-11-27T16:22:11,094 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp/B/df789d29b260439e8485b4ea2ec0f30d is 50, key is test_row_0/B:col10/1732724530414/Put/seqid=0 2024-11-27T16:22:11,113 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742116_1292 (size=12151) 2024-11-27T16:22:11,115 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=156 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp/B/df789d29b260439e8485b4ea2ec0f30d 2024-11-27T16:22:11,124 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp/C/aa172b1cac9e4abea1df47e4c7531553 is 50, key is test_row_0/C:col10/1732724530414/Put/seqid=0 2024-11-27T16:22:11,129 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742117_1293 (size=12151) 2024-11-27T16:22:11,131 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=156 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp/C/aa172b1cac9e4abea1df47e4c7531553 2024-11-27T16:22:11,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=73 2024-11-27T16:22:11,136 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp/A/85fd439bed2543da809075c3b7436ede as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/A/85fd439bed2543da809075c3b7436ede 2024-11-27T16:22:11,144 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/A/85fd439bed2543da809075c3b7436ede, entries=150, sequenceid=156, filesize=11.9 K 2024-11-27T16:22:11,145 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp/B/df789d29b260439e8485b4ea2ec0f30d as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/B/df789d29b260439e8485b4ea2ec0f30d 2024-11-27T16:22:11,146 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:11,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54240 deadline: 1732724591144, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:11,146 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:11,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54212 deadline: 1732724591145, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:11,148 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:11,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54196 deadline: 1732724591146, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:11,151 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/B/df789d29b260439e8485b4ea2ec0f30d, entries=150, sequenceid=156, filesize=11.9 K 2024-11-27T16:22:11,152 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp/C/aa172b1cac9e4abea1df47e4c7531553 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/C/aa172b1cac9e4abea1df47e4c7531553 2024-11-27T16:22:11,157 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/C/aa172b1cac9e4abea1df47e4c7531553, entries=150, sequenceid=156, filesize=11.9 K 2024-11-27T16:22:11,158 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=46.96 KB/48090 for b7c24f821c64d1ed1608bef04711b574 in 129ms, sequenceid=156, compaction requested=true 2024-11-27T16:22:11,158 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for b7c24f821c64d1ed1608bef04711b574: 2024-11-27T16:22:11,158 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-27T16:22:11,159 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b7c24f821c64d1ed1608bef04711b574:A, priority=-2147483648, current under compaction store size is 1 2024-11-27T16:22:11,159 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T16:22:11,159 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b7c24f821c64d1ed1608bef04711b574:B, priority=-2147483648, current under compaction store size is 2 2024-11-27T16:22:11,159 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T16:22:11,159 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b7c24f821c64d1ed1608bef04711b574:C, priority=-2147483648, current under compaction store size is 3 2024-11-27T16:22:11,159 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-27T16:22:11,159 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-27T16:22:11,160 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48360 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-27T16:22:11,160 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HStore(1540): b7c24f821c64d1ed1608bef04711b574/A is initiating minor compaction (all files) 2024-11-27T16:22:11,160 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of b7c24f821c64d1ed1608bef04711b574/A in TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574. 2024-11-27T16:22:11,160 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48360 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-27T16:22:11,161 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HStore(1540): b7c24f821c64d1ed1608bef04711b574/B is initiating minor compaction (all files) 2024-11-27T16:22:11,161 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/A/0f021abf0a7a44fb92a0989baec0189c, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/A/7150e27bc9714340a87474ebda76a204, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/A/881f45e681c942f9b55872138dbcf7b3, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/A/85fd439bed2543da809075c3b7436ede] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp, totalSize=47.2 K 2024-11-27T16:22:11,161 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of b7c24f821c64d1ed1608bef04711b574/B in TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574. 2024-11-27T16:22:11,161 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/B/a070bef300bd459aa9b05a97b0138220, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/B/fc4dec5df0fd4169bb5d3c3b98a566e5, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/B/8f6d613ae31f48baacf7087f205a370a, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/B/df789d29b260439e8485b4ea2ec0f30d] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp, totalSize=47.2 K 2024-11-27T16:22:11,161 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0f021abf0a7a44fb92a0989baec0189c, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=89, earliestPutTs=1732724528600 2024-11-27T16:22:11,162 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting a070bef300bd459aa9b05a97b0138220, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=89, earliestPutTs=1732724528600 2024-11-27T16:22:11,162 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7150e27bc9714340a87474ebda76a204, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=117, earliestPutTs=1732724528941 2024-11-27T16:22:11,162 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting fc4dec5df0fd4169bb5d3c3b98a566e5, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=117, earliestPutTs=1732724528941 2024-11-27T16:22:11,162 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 881f45e681c942f9b55872138dbcf7b3, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=130, earliestPutTs=1732724529259 2024-11-27T16:22:11,162 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting 8f6d613ae31f48baacf7087f205a370a, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=130, earliestPutTs=1732724529259 2024-11-27T16:22:11,163 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 85fd439bed2543da809075c3b7436ede, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=156, earliestPutTs=1732724530414 2024-11-27T16:22:11,163 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting df789d29b260439e8485b4ea2ec0f30d, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=156, earliestPutTs=1732724530414 2024-11-27T16:22:11,172 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): b7c24f821c64d1ed1608bef04711b574#A#compaction#243 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 53.85 MB/second 2024-11-27T16:22:11,173 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp/A/2404d095608244e6975e9446c10e76f9 is 50, key is test_row_0/A:col10/1732724530414/Put/seqid=0 2024-11-27T16:22:11,176 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): b7c24f821c64d1ed1608bef04711b574#B#compaction#244 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 53.85 MB/second 2024-11-27T16:22:11,176 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp/B/fd391f769de44ae8a43fa5d46ac5795b is 50, key is test_row_0/B:col10/1732724530414/Put/seqid=0 2024-11-27T16:22:11,179 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742118_1294 (size=12493) 2024-11-27T16:22:11,182 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742119_1295 (size=12493) 2024-11-27T16:22:11,184 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp/A/2404d095608244e6975e9446c10e76f9 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/A/2404d095608244e6975e9446c10e76f9 2024-11-27T16:22:11,188 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:22:11,189 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=74 2024-11-27T16:22:11,189 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in b7c24f821c64d1ed1608bef04711b574/A of b7c24f821c64d1ed1608bef04711b574 into 2404d095608244e6975e9446c10e76f9(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T16:22:11,189 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for b7c24f821c64d1ed1608bef04711b574: 2024-11-27T16:22:11,189 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574. 2024-11-27T16:22:11,189 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574., storeName=b7c24f821c64d1ed1608bef04711b574/A, priority=12, startTime=1732724531158; duration=0sec 2024-11-27T16:22:11,189 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-27T16:22:11,189 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b7c24f821c64d1ed1608bef04711b574:A 2024-11-27T16:22:11,189 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegion(2837): Flushing b7c24f821c64d1ed1608bef04711b574 3/3 column families, dataSize=46.96 KB heapSize=123.80 KB 2024-11-27T16:22:11,189 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-27T16:22:11,189 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b7c24f821c64d1ed1608bef04711b574, store=A 2024-11-27T16:22:11,190 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:22:11,190 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b7c24f821c64d1ed1608bef04711b574, store=B 2024-11-27T16:22:11,190 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:22:11,190 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b7c24f821c64d1ed1608bef04711b574, store=C 2024-11-27T16:22:11,190 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:22:11,192 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48360 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-27T16:22:11,192 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HStore(1540): b7c24f821c64d1ed1608bef04711b574/C is initiating minor compaction (all files) 2024-11-27T16:22:11,192 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of b7c24f821c64d1ed1608bef04711b574/C in TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574. 2024-11-27T16:22:11,192 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/C/d37e6a157e1541e8805f98d659535b4f, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/C/e711ee5275524fd3a4130d0eb5f7c387, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/C/071b08a8b8ba4b59898ea9cf8a3263ec, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/C/aa172b1cac9e4abea1df47e4c7531553] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp, totalSize=47.2 K 2024-11-27T16:22:11,192 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.Compactor(224): Compacting d37e6a157e1541e8805f98d659535b4f, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=89, earliestPutTs=1732724528600 2024-11-27T16:22:11,193 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.Compactor(224): Compacting e711ee5275524fd3a4130d0eb5f7c387, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=117, earliestPutTs=1732724528941 2024-11-27T16:22:11,193 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 071b08a8b8ba4b59898ea9cf8a3263ec, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=130, earliestPutTs=1732724529259 2024-11-27T16:22:11,194 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.Compactor(224): Compacting aa172b1cac9e4abea1df47e4c7531553, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=156, earliestPutTs=1732724530414 2024-11-27T16:22:11,194 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp/A/f05337dfa31c4b749a621362c83259cd is 50, key is test_row_0/A:col10/1732724531041/Put/seqid=0 2024-11-27T16:22:11,198 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742120_1296 (size=12151) 2024-11-27T16:22:11,199 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=167 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp/A/f05337dfa31c4b749a621362c83259cd 2024-11-27T16:22:11,204 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): b7c24f821c64d1ed1608bef04711b574#C#compaction#246 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 53.85 MB/second 2024-11-27T16:22:11,205 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp/C/41cfddc0af924381b192c213d0e0d916 is 50, key is test_row_0/C:col10/1732724530414/Put/seqid=0 2024-11-27T16:22:11,208 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp/B/7c0c6d99391e48a6ad4900213b74bf0c is 50, key is test_row_0/B:col10/1732724531041/Put/seqid=0 2024-11-27T16:22:11,213 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742121_1297 (size=12493) 2024-11-27T16:22:11,229 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742122_1298 (size=12151) 2024-11-27T16:22:11,230 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=167 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp/B/7c0c6d99391e48a6ad4900213b74bf0c 2024-11-27T16:22:11,238 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp/C/bd6a3ea46fac493e9c948398025cfea5 is 50, key is test_row_0/C:col10/1732724531041/Put/seqid=0 2024-11-27T16:22:11,262 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742123_1299 (size=12151) 2024-11-27T16:22:11,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=73 2024-11-27T16:22:11,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(8581): Flush requested on b7c24f821c64d1ed1608bef04711b574 2024-11-27T16:22:11,349 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574. as already flushing 2024-11-27T16:22:11,384 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:11,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54196 deadline: 1732724591381, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:11,385 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:11,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54212 deadline: 1732724591381, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:11,385 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:11,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54240 deadline: 1732724591382, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:11,487 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:11,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54196 deadline: 1732724591485, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:11,488 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:11,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54212 deadline: 1732724591486, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:11,491 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:11,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54240 deadline: 1732724591488, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:11,587 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp/B/fd391f769de44ae8a43fa5d46ac5795b as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/B/fd391f769de44ae8a43fa5d46ac5795b 2024-11-27T16:22:11,592 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in b7c24f821c64d1ed1608bef04711b574/B of b7c24f821c64d1ed1608bef04711b574 into fd391f769de44ae8a43fa5d46ac5795b(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T16:22:11,592 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for b7c24f821c64d1ed1608bef04711b574: 2024-11-27T16:22:11,592 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574., storeName=b7c24f821c64d1ed1608bef04711b574/B, priority=12, startTime=1732724531159; duration=0sec 2024-11-27T16:22:11,592 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T16:22:11,592 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b7c24f821c64d1ed1608bef04711b574:B 2024-11-27T16:22:11,619 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp/C/41cfddc0af924381b192c213d0e0d916 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/C/41cfddc0af924381b192c213d0e0d916 2024-11-27T16:22:11,624 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in b7c24f821c64d1ed1608bef04711b574/C of b7c24f821c64d1ed1608bef04711b574 into 41cfddc0af924381b192c213d0e0d916(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T16:22:11,624 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for b7c24f821c64d1ed1608bef04711b574: 2024-11-27T16:22:11,624 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574., storeName=b7c24f821c64d1ed1608bef04711b574/C, priority=12, startTime=1732724531159; duration=0sec 2024-11-27T16:22:11,624 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T16:22:11,624 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b7c24f821c64d1ed1608bef04711b574:C 2024-11-27T16:22:11,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=73 2024-11-27T16:22:11,663 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=167 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp/C/bd6a3ea46fac493e9c948398025cfea5 2024-11-27T16:22:11,668 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp/A/f05337dfa31c4b749a621362c83259cd as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/A/f05337dfa31c4b749a621362c83259cd 2024-11-27T16:22:11,672 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/A/f05337dfa31c4b749a621362c83259cd, entries=150, sequenceid=167, filesize=11.9 K 2024-11-27T16:22:11,673 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp/B/7c0c6d99391e48a6ad4900213b74bf0c as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/B/7c0c6d99391e48a6ad4900213b74bf0c 2024-11-27T16:22:11,677 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/B/7c0c6d99391e48a6ad4900213b74bf0c, entries=150, sequenceid=167, filesize=11.9 K 2024-11-27T16:22:11,678 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp/C/bd6a3ea46fac493e9c948398025cfea5 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/C/bd6a3ea46fac493e9c948398025cfea5 2024-11-27T16:22:11,683 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/C/bd6a3ea46fac493e9c948398025cfea5, entries=150, sequenceid=167, filesize=11.9 K 2024-11-27T16:22:11,684 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegion(3040): Finished flush of dataSize ~46.96 KB/48090, heapSize ~123.75 KB/126720, currentSize=154.31 KB/158010 for b7c24f821c64d1ed1608bef04711b574 in 495ms, sequenceid=167, compaction requested=false 2024-11-27T16:22:11,684 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegion(2538): Flush status journal for b7c24f821c64d1ed1608bef04711b574: 2024-11-27T16:22:11,684 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574. 2024-11-27T16:22:11,684 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=74 2024-11-27T16:22:11,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4106): Remote procedure done, pid=74 2024-11-27T16:22:11,687 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=74, resume processing ppid=73 2024-11-27T16:22:11,687 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=74, ppid=73, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 650 msec 2024-11-27T16:22:11,689 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=73, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=73, table=TestAcidGuarantees in 654 msec 2024-11-27T16:22:11,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(8581): Flush requested on b7c24f821c64d1ed1608bef04711b574 2024-11-27T16:22:11,691 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing b7c24f821c64d1ed1608bef04711b574 3/3 column families, dataSize=161.02 KB heapSize=422.63 KB 2024-11-27T16:22:11,693 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b7c24f821c64d1ed1608bef04711b574, store=A 2024-11-27T16:22:11,693 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:22:11,693 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b7c24f821c64d1ed1608bef04711b574, store=B 2024-11-27T16:22:11,693 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:22:11,693 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b7c24f821c64d1ed1608bef04711b574, store=C 2024-11-27T16:22:11,693 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:22:11,699 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp/A/9662442ba947476f9ccd1d3e7d4d8d6e is 50, key is test_row_0/A:col10/1732724531381/Put/seqid=0 2024-11-27T16:22:11,702 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:11,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54240 deadline: 1732724591700, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:11,703 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:11,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54196 deadline: 1732724591701, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:11,703 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:11,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54212 deadline: 1732724591701, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:11,734 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742124_1300 (size=12151) 2024-11-27T16:22:11,736 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=197 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp/A/9662442ba947476f9ccd1d3e7d4d8d6e 2024-11-27T16:22:11,746 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp/B/6325f3e0cbba4917a3990e2bc158db33 is 50, key is test_row_0/B:col10/1732724531381/Put/seqid=0 2024-11-27T16:22:11,750 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742125_1301 (size=12151) 2024-11-27T16:22:11,751 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=197 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp/B/6325f3e0cbba4917a3990e2bc158db33 2024-11-27T16:22:11,765 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp/C/751f5f0011764921a3f4ef01ebbb52b3 is 50, key is test_row_0/C:col10/1732724531381/Put/seqid=0 2024-11-27T16:22:11,782 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742126_1302 (size=12151) 2024-11-27T16:22:11,782 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=197 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp/C/751f5f0011764921a3f4ef01ebbb52b3 2024-11-27T16:22:11,793 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp/A/9662442ba947476f9ccd1d3e7d4d8d6e as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/A/9662442ba947476f9ccd1d3e7d4d8d6e 2024-11-27T16:22:11,800 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/A/9662442ba947476f9ccd1d3e7d4d8d6e, entries=150, sequenceid=197, filesize=11.9 K 2024-11-27T16:22:11,801 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp/B/6325f3e0cbba4917a3990e2bc158db33 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/B/6325f3e0cbba4917a3990e2bc158db33 2024-11-27T16:22:11,805 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:11,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54240 deadline: 1732724591803, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:11,806 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:11,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54212 deadline: 1732724591804, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:11,807 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:11,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54196 deadline: 1732724591805, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:11,807 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/B/6325f3e0cbba4917a3990e2bc158db33, entries=150, sequenceid=197, filesize=11.9 K 2024-11-27T16:22:11,809 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp/C/751f5f0011764921a3f4ef01ebbb52b3 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/C/751f5f0011764921a3f4ef01ebbb52b3 2024-11-27T16:22:11,816 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/C/751f5f0011764921a3f4ef01ebbb52b3, entries=150, sequenceid=197, filesize=11.9 K 2024-11-27T16:22:11,816 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~167.72 KB/171750, heapSize ~440.16 KB/450720, currentSize=33.54 KB/34350 for b7c24f821c64d1ed1608bef04711b574 in 125ms, sequenceid=197, compaction requested=true 2024-11-27T16:22:11,817 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for b7c24f821c64d1ed1608bef04711b574: 2024-11-27T16:22:11,817 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-27T16:22:11,817 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b7c24f821c64d1ed1608bef04711b574:A, priority=-2147483648, current under compaction store size is 1 2024-11-27T16:22:11,817 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T16:22:11,817 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b7c24f821c64d1ed1608bef04711b574:B, priority=-2147483648, current under compaction store size is 2 2024-11-27T16:22:11,817 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-27T16:22:11,817 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b7c24f821c64d1ed1608bef04711b574:C, priority=-2147483648, current under compaction store size is 3 2024-11-27T16:22:11,817 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-11-27T16:22:11,817 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-27T16:22:11,818 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36795 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-27T16:22:11,818 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HStore(1540): b7c24f821c64d1ed1608bef04711b574/A is initiating minor compaction (all files) 2024-11-27T16:22:11,818 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of b7c24f821c64d1ed1608bef04711b574/A in TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574. 2024-11-27T16:22:11,818 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/A/2404d095608244e6975e9446c10e76f9, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/A/f05337dfa31c4b749a621362c83259cd, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/A/9662442ba947476f9ccd1d3e7d4d8d6e] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp, totalSize=35.9 K 2024-11-27T16:22:11,818 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36795 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-27T16:22:11,818 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HStore(1540): b7c24f821c64d1ed1608bef04711b574/B is initiating minor compaction (all files) 2024-11-27T16:22:11,818 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of b7c24f821c64d1ed1608bef04711b574/B in TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574. 2024-11-27T16:22:11,819 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/B/fd391f769de44ae8a43fa5d46ac5795b, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/B/7c0c6d99391e48a6ad4900213b74bf0c, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/B/6325f3e0cbba4917a3990e2bc158db33] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp, totalSize=35.9 K 2024-11-27T16:22:11,819 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting 2404d095608244e6975e9446c10e76f9, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=156, earliestPutTs=1732724530414 2024-11-27T16:22:11,819 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.Compactor(224): Compacting fd391f769de44ae8a43fa5d46ac5795b, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=156, earliestPutTs=1732724530414 2024-11-27T16:22:11,819 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting f05337dfa31c4b749a621362c83259cd, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=167, earliestPutTs=1732724531038 2024-11-27T16:22:11,819 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting 9662442ba947476f9ccd1d3e7d4d8d6e, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=197, earliestPutTs=1732724531381 2024-11-27T16:22:11,819 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7c0c6d99391e48a6ad4900213b74bf0c, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=167, earliestPutTs=1732724531038 2024-11-27T16:22:11,820 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6325f3e0cbba4917a3990e2bc158db33, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=197, earliestPutTs=1732724531381 2024-11-27T16:22:11,828 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): b7c24f821c64d1ed1608bef04711b574#A#compaction#252 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 53.85 MB/second 2024-11-27T16:22:11,829 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp/A/7c2aeb01bbaf4b00b63ae6f14903cc0f is 50, key is test_row_0/A:col10/1732724531381/Put/seqid=0 2024-11-27T16:22:11,829 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): b7c24f821c64d1ed1608bef04711b574#B#compaction#253 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 53.85 MB/second 2024-11-27T16:22:11,830 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp/B/2fbffab0024645f5a8f3a87b4a9fd36d is 50, key is test_row_0/B:col10/1732724531381/Put/seqid=0 2024-11-27T16:22:11,844 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742127_1303 (size=12595) 2024-11-27T16:22:11,859 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742128_1304 (size=12595) 2024-11-27T16:22:11,866 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp/B/2fbffab0024645f5a8f3a87b4a9fd36d as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/B/2fbffab0024645f5a8f3a87b4a9fd36d 2024-11-27T16:22:11,871 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in b7c24f821c64d1ed1608bef04711b574/B of b7c24f821c64d1ed1608bef04711b574 into 2fbffab0024645f5a8f3a87b4a9fd36d(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T16:22:11,872 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for b7c24f821c64d1ed1608bef04711b574: 2024-11-27T16:22:11,872 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574., storeName=b7c24f821c64d1ed1608bef04711b574/B, priority=13, startTime=1732724531817; duration=0sec 2024-11-27T16:22:11,872 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-27T16:22:11,872 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b7c24f821c64d1ed1608bef04711b574:B 2024-11-27T16:22:11,872 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-27T16:22:11,873 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36795 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-27T16:22:11,873 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HStore(1540): b7c24f821c64d1ed1608bef04711b574/C is initiating minor compaction (all files) 2024-11-27T16:22:11,873 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of b7c24f821c64d1ed1608bef04711b574/C in TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574. 2024-11-27T16:22:11,873 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/C/41cfddc0af924381b192c213d0e0d916, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/C/bd6a3ea46fac493e9c948398025cfea5, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/C/751f5f0011764921a3f4ef01ebbb52b3] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp, totalSize=35.9 K 2024-11-27T16:22:11,875 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 41cfddc0af924381b192c213d0e0d916, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=156, earliestPutTs=1732724530414 2024-11-27T16:22:11,876 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.Compactor(224): Compacting bd6a3ea46fac493e9c948398025cfea5, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=167, earliestPutTs=1732724531038 2024-11-27T16:22:11,876 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 751f5f0011764921a3f4ef01ebbb52b3, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=197, earliestPutTs=1732724531381 2024-11-27T16:22:11,885 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): b7c24f821c64d1ed1608bef04711b574#C#compaction#254 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 53.85 MB/second 2024-11-27T16:22:11,887 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp/C/7e5b17435a3d40f0ab8403eaf4dcc9d7 is 50, key is test_row_0/C:col10/1732724531381/Put/seqid=0 2024-11-27T16:22:11,898 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742129_1305 (size=12595) 2024-11-27T16:22:11,911 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp/C/7e5b17435a3d40f0ab8403eaf4dcc9d7 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/C/7e5b17435a3d40f0ab8403eaf4dcc9d7 2024-11-27T16:22:11,919 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in b7c24f821c64d1ed1608bef04711b574/C of b7c24f821c64d1ed1608bef04711b574 into 7e5b17435a3d40f0ab8403eaf4dcc9d7(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T16:22:11,919 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for b7c24f821c64d1ed1608bef04711b574: 2024-11-27T16:22:11,919 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574., storeName=b7c24f821c64d1ed1608bef04711b574/C, priority=13, startTime=1732724531817; duration=0sec 2024-11-27T16:22:11,919 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T16:22:11,919 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b7c24f821c64d1ed1608bef04711b574:C 2024-11-27T16:22:12,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(8581): Flush requested on b7c24f821c64d1ed1608bef04711b574 2024-11-27T16:22:12,012 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing b7c24f821c64d1ed1608bef04711b574 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-27T16:22:12,012 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b7c24f821c64d1ed1608bef04711b574, store=A 2024-11-27T16:22:12,012 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:22:12,012 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b7c24f821c64d1ed1608bef04711b574, store=B 2024-11-27T16:22:12,012 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:22:12,012 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b7c24f821c64d1ed1608bef04711b574, store=C 2024-11-27T16:22:12,013 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:22:12,021 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp/A/92e5e329c1cb4ea4abe6350fb58a1697 is 50, key is test_row_0/A:col10/1732724531696/Put/seqid=0 2024-11-27T16:22:12,028 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742130_1306 (size=12151) 2024-11-27T16:22:12,062 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:12,062 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:12,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54212 deadline: 1732724592057, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:12,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54240 deadline: 1732724592057, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:12,060 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:12,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54196 deadline: 1732724592057, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:12,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=73 2024-11-27T16:22:12,139 INFO [Thread-1213 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 73 completed 2024-11-27T16:22:12,141 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-27T16:22:12,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] procedure2.ProcedureExecutor(1098): Stored pid=75, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=75, table=TestAcidGuarantees 2024-11-27T16:22:12,143 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=75, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=75, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-27T16:22:12,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=75 2024-11-27T16:22:12,143 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=75, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=75, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-27T16:22:12,144 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=76, ppid=75, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-27T16:22:12,165 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:12,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54240 deadline: 1732724592164, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:12,166 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:12,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54212 deadline: 1732724592164, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:12,171 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:12,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54196 deadline: 1732724592169, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:12,244 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp/A/7c2aeb01bbaf4b00b63ae6f14903cc0f as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/A/7c2aeb01bbaf4b00b63ae6f14903cc0f 2024-11-27T16:22:12,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=75 2024-11-27T16:22:12,248 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in b7c24f821c64d1ed1608bef04711b574/A of b7c24f821c64d1ed1608bef04711b574 into 7c2aeb01bbaf4b00b63ae6f14903cc0f(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T16:22:12,248 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for b7c24f821c64d1ed1608bef04711b574: 2024-11-27T16:22:12,248 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574., storeName=b7c24f821c64d1ed1608bef04711b574/A, priority=13, startTime=1732724531817; duration=0sec 2024-11-27T16:22:12,248 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T16:22:12,248 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b7c24f821c64d1ed1608bef04711b574:A 2024-11-27T16:22:12,295 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:22:12,296 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=76 2024-11-27T16:22:12,296 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574. 2024-11-27T16:22:12,296 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574. as already flushing 2024-11-27T16:22:12,296 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574. 2024-11-27T16:22:12,296 ERROR [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] handler.RSProcedureHandler(58): pid=76 java.io.IOException: Unable to complete flush {ENCODED => b7c24f821c64d1ed1608bef04711b574, NAME => 'TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:22:12,296 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=76 java.io.IOException: Unable to complete flush {ENCODED => b7c24f821c64d1ed1608bef04711b574, NAME => 'TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:22:12,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4114): Remote procedure failed, pid=76 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b7c24f821c64d1ed1608bef04711b574, NAME => 'TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b7c24f821c64d1ed1608bef04711b574, NAME => 'TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:22:12,368 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:12,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54212 deadline: 1732724592367, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:12,368 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:12,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54240 deadline: 1732724592367, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:12,374 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:12,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54196 deadline: 1732724592373, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:12,430 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=210 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp/A/92e5e329c1cb4ea4abe6350fb58a1697 2024-11-27T16:22:12,439 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp/B/80e761e651f943cfa3e7f5c35e76dc65 is 50, key is test_row_0/B:col10/1732724531696/Put/seqid=0 2024-11-27T16:22:12,443 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742131_1307 (size=12151) 2024-11-27T16:22:12,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=75 2024-11-27T16:22:12,448 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:22:12,449 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=76 2024-11-27T16:22:12,449 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574. 2024-11-27T16:22:12,449 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574. as already flushing 2024-11-27T16:22:12,449 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574. 2024-11-27T16:22:12,449 ERROR [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] handler.RSProcedureHandler(58): pid=76 java.io.IOException: Unable to complete flush {ENCODED => b7c24f821c64d1ed1608bef04711b574, NAME => 'TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:22:12,449 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=76 java.io.IOException: Unable to complete flush {ENCODED => b7c24f821c64d1ed1608bef04711b574, NAME => 'TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:22:12,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4114): Remote procedure failed, pid=76 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b7c24f821c64d1ed1608bef04711b574, NAME => 'TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b7c24f821c64d1ed1608bef04711b574, NAME => 'TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:22:12,601 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:22:12,601 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=76 2024-11-27T16:22:12,601 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574. 2024-11-27T16:22:12,602 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574. as already flushing 2024-11-27T16:22:12,602 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574. 2024-11-27T16:22:12,602 ERROR [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] handler.RSProcedureHandler(58): pid=76 java.io.IOException: Unable to complete flush {ENCODED => b7c24f821c64d1ed1608bef04711b574, NAME => 'TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:22:12,602 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=76 java.io.IOException: Unable to complete flush {ENCODED => b7c24f821c64d1ed1608bef04711b574, NAME => 'TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:22:12,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4114): Remote procedure failed, pid=76 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b7c24f821c64d1ed1608bef04711b574, NAME => 'TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b7c24f821c64d1ed1608bef04711b574, NAME => 'TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:22:12,670 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:12,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54212 deadline: 1732724592669, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:12,672 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:12,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54240 deadline: 1732724592670, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:12,680 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:12,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54196 deadline: 1732724592678, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:12,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=75 2024-11-27T16:22:12,754 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:22:12,754 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=76 2024-11-27T16:22:12,754 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574. 2024-11-27T16:22:12,754 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574. as already flushing 2024-11-27T16:22:12,755 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574. 2024-11-27T16:22:12,755 ERROR [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] handler.RSProcedureHandler(58): pid=76 java.io.IOException: Unable to complete flush {ENCODED => b7c24f821c64d1ed1608bef04711b574, NAME => 'TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:22:12,755 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=76 java.io.IOException: Unable to complete flush {ENCODED => b7c24f821c64d1ed1608bef04711b574, NAME => 'TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:22:12,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4114): Remote procedure failed, pid=76 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b7c24f821c64d1ed1608bef04711b574, NAME => 'TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b7c24f821c64d1ed1608bef04711b574, NAME => 'TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:22:12,844 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=210 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp/B/80e761e651f943cfa3e7f5c35e76dc65 2024-11-27T16:22:12,852 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp/C/01c87d7424da4e7daf0c91810cd6307f is 50, key is test_row_0/C:col10/1732724531696/Put/seqid=0 2024-11-27T16:22:12,856 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742132_1308 (size=12151) 2024-11-27T16:22:12,907 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:22:12,907 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=76 2024-11-27T16:22:12,907 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574. 2024-11-27T16:22:12,907 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574. as already flushing 2024-11-27T16:22:12,907 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574. 2024-11-27T16:22:12,908 ERROR [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] handler.RSProcedureHandler(58): pid=76 java.io.IOException: Unable to complete flush {ENCODED => b7c24f821c64d1ed1608bef04711b574, NAME => 'TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:22:12,908 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=76 java.io.IOException: Unable to complete flush {ENCODED => b7c24f821c64d1ed1608bef04711b574, NAME => 'TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:22:12,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4114): Remote procedure failed, pid=76 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b7c24f821c64d1ed1608bef04711b574, NAME => 'TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b7c24f821c64d1ed1608bef04711b574, NAME => 'TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:22:13,060 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:22:13,060 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=76 2024-11-27T16:22:13,061 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574. 2024-11-27T16:22:13,061 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574. as already flushing 2024-11-27T16:22:13,061 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574. 2024-11-27T16:22:13,061 ERROR [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] handler.RSProcedureHandler(58): pid=76 java.io.IOException: Unable to complete flush {ENCODED => b7c24f821c64d1ed1608bef04711b574, NAME => 'TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:22:13,061 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=76 java.io.IOException: Unable to complete flush {ENCODED => b7c24f821c64d1ed1608bef04711b574, NAME => 'TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:22:13,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4114): Remote procedure failed, pid=76 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b7c24f821c64d1ed1608bef04711b574, NAME => 'TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b7c24f821c64d1ed1608bef04711b574, NAME => 'TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:22:13,175 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:13,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54212 deadline: 1732724593173, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:13,178 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:13,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54240 deadline: 1732724593176, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:13,184 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:13,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54196 deadline: 1732724593184, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:13,214 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:22:13,214 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=76 2024-11-27T16:22:13,215 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574. 2024-11-27T16:22:13,215 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574. as already flushing 2024-11-27T16:22:13,215 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574. 2024-11-27T16:22:13,215 ERROR [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] handler.RSProcedureHandler(58): pid=76 java.io.IOException: Unable to complete flush {ENCODED => b7c24f821c64d1ed1608bef04711b574, NAME => 'TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:22:13,215 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=76 java.io.IOException: Unable to complete flush {ENCODED => b7c24f821c64d1ed1608bef04711b574, NAME => 'TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:22:13,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4114): Remote procedure failed, pid=76 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b7c24f821c64d1ed1608bef04711b574, NAME => 'TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b7c24f821c64d1ed1608bef04711b574, NAME => 'TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:22:13,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=75 2024-11-27T16:22:13,261 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=210 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp/C/01c87d7424da4e7daf0c91810cd6307f 2024-11-27T16:22:13,268 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp/A/92e5e329c1cb4ea4abe6350fb58a1697 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/A/92e5e329c1cb4ea4abe6350fb58a1697 2024-11-27T16:22:13,274 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/A/92e5e329c1cb4ea4abe6350fb58a1697, entries=150, sequenceid=210, filesize=11.9 K 2024-11-27T16:22:13,275 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp/B/80e761e651f943cfa3e7f5c35e76dc65 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/B/80e761e651f943cfa3e7f5c35e76dc65 2024-11-27T16:22:13,287 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/B/80e761e651f943cfa3e7f5c35e76dc65, entries=150, sequenceid=210, filesize=11.9 K 2024-11-27T16:22:13,288 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp/C/01c87d7424da4e7daf0c91810cd6307f as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/C/01c87d7424da4e7daf0c91810cd6307f 2024-11-27T16:22:13,297 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/C/01c87d7424da4e7daf0c91810cd6307f, entries=150, sequenceid=210, filesize=11.9 K 2024-11-27T16:22:13,298 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=161.02 KB/164880 for b7c24f821c64d1ed1608bef04711b574 in 1286ms, sequenceid=210, compaction requested=false 2024-11-27T16:22:13,299 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for b7c24f821c64d1ed1608bef04711b574: 2024-11-27T16:22:13,367 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:22:13,369 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=76 2024-11-27T16:22:13,369 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574. 2024-11-27T16:22:13,370 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(2837): Flushing b7c24f821c64d1ed1608bef04711b574 3/3 column families, dataSize=161.02 KB heapSize=422.63 KB 2024-11-27T16:22:13,370 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b7c24f821c64d1ed1608bef04711b574, store=A 2024-11-27T16:22:13,370 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:22:13,370 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b7c24f821c64d1ed1608bef04711b574, store=B 2024-11-27T16:22:13,370 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:22:13,370 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b7c24f821c64d1ed1608bef04711b574, store=C 2024-11-27T16:22:13,370 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:22:13,375 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp/A/a956ea6c672740d281884e6d08b82460 is 50, key is test_row_0/A:col10/1732724532055/Put/seqid=0 2024-11-27T16:22:13,403 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742133_1309 (size=12151) 2024-11-27T16:22:13,403 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=238 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp/A/a956ea6c672740d281884e6d08b82460 2024-11-27T16:22:13,414 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp/B/14118868d9e74bd7979191fbbcf5e13d is 50, key is test_row_0/B:col10/1732724532055/Put/seqid=0 2024-11-27T16:22:13,431 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742134_1310 (size=12151) 2024-11-27T16:22:13,832 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=238 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp/B/14118868d9e74bd7979191fbbcf5e13d 2024-11-27T16:22:13,841 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp/C/9956edc1acd844efaee5198751249934 is 50, key is test_row_0/C:col10/1732724532055/Put/seqid=0 2024-11-27T16:22:13,845 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742135_1311 (size=12151) 2024-11-27T16:22:13,906 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574. as already flushing 2024-11-27T16:22:13,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(8581): Flush requested on b7c24f821c64d1ed1608bef04711b574 2024-11-27T16:22:13,921 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:13,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54228 deadline: 1732724593919, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:13,943 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:13,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54232 deadline: 1732724593942, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:13,944 DEBUG [Thread-1207 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8264 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574., hostname=7b191dec6496,44169,1732724452967, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-27T16:22:14,023 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:14,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54228 deadline: 1732724594023, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:14,183 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:14,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54240 deadline: 1732724594182, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:14,191 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:14,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54212 deadline: 1732724594190, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:14,192 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:14,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 147 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54196 deadline: 1732724594190, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:14,226 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:14,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54228 deadline: 1732724594225, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:14,246 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=238 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp/C/9956edc1acd844efaee5198751249934 2024-11-27T16:22:14,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=75 2024-11-27T16:22:14,251 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp/A/a956ea6c672740d281884e6d08b82460 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/A/a956ea6c672740d281884e6d08b82460 2024-11-27T16:22:14,254 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/A/a956ea6c672740d281884e6d08b82460, entries=150, sequenceid=238, filesize=11.9 K 2024-11-27T16:22:14,255 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp/B/14118868d9e74bd7979191fbbcf5e13d as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/B/14118868d9e74bd7979191fbbcf5e13d 2024-11-27T16:22:14,269 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/B/14118868d9e74bd7979191fbbcf5e13d, entries=150, sequenceid=238, filesize=11.9 K 2024-11-27T16:22:14,270 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp/C/9956edc1acd844efaee5198751249934 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/C/9956edc1acd844efaee5198751249934 2024-11-27T16:22:14,274 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/C/9956edc1acd844efaee5198751249934, entries=150, sequenceid=238, filesize=11.9 K 2024-11-27T16:22:14,275 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(3040): Finished flush of dataSize ~161.02 KB/164880, heapSize ~422.58 KB/432720, currentSize=40.25 KB/41220 for b7c24f821c64d1ed1608bef04711b574 in 905ms, sequenceid=238, compaction requested=true 2024-11-27T16:22:14,275 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(2538): Flush status journal for b7c24f821c64d1ed1608bef04711b574: 2024-11-27T16:22:14,275 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574. 2024-11-27T16:22:14,275 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=76 2024-11-27T16:22:14,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4106): Remote procedure done, pid=76 2024-11-27T16:22:14,278 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=76, resume processing ppid=75 2024-11-27T16:22:14,278 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=76, ppid=75, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.1320 sec 2024-11-27T16:22:14,279 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=75, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=75, table=TestAcidGuarantees in 2.1370 sec 2024-11-27T16:22:14,535 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing b7c24f821c64d1ed1608bef04711b574 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-27T16:22:14,535 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b7c24f821c64d1ed1608bef04711b574, store=A 2024-11-27T16:22:14,535 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:22:14,535 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b7c24f821c64d1ed1608bef04711b574, store=B 2024-11-27T16:22:14,535 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:22:14,535 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b7c24f821c64d1ed1608bef04711b574, store=C 2024-11-27T16:22:14,535 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:22:14,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(8581): Flush requested on b7c24f821c64d1ed1608bef04711b574 2024-11-27T16:22:14,552 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp/A/6395bc582d4d4f5187e4b66de81df8d3 is 50, key is test_row_0/A:col10/1732724534529/Put/seqid=0 2024-11-27T16:22:14,597 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742136_1312 (size=12151) 2024-11-27T16:22:14,598 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=249 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp/A/6395bc582d4d4f5187e4b66de81df8d3 2024-11-27T16:22:14,617 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp/B/bad35449d9ba4f3abf1114348c433565 is 50, key is test_row_0/B:col10/1732724534529/Put/seqid=0 2024-11-27T16:22:14,644 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742137_1313 (size=12151) 2024-11-27T16:22:14,645 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=249 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp/B/bad35449d9ba4f3abf1114348c433565 2024-11-27T16:22:14,663 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp/C/9063047429b1471685b053e906a4ab46 is 50, key is test_row_0/C:col10/1732724534529/Put/seqid=0 2024-11-27T16:22:14,699 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742138_1314 (size=12151) 2024-11-27T16:22:14,700 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=249 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp/C/9063047429b1471685b053e906a4ab46 2024-11-27T16:22:14,705 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp/A/6395bc582d4d4f5187e4b66de81df8d3 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/A/6395bc582d4d4f5187e4b66de81df8d3 2024-11-27T16:22:14,709 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/A/6395bc582d4d4f5187e4b66de81df8d3, entries=150, sequenceid=249, filesize=11.9 K 2024-11-27T16:22:14,710 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp/B/bad35449d9ba4f3abf1114348c433565 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/B/bad35449d9ba4f3abf1114348c433565 2024-11-27T16:22:14,725 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/B/bad35449d9ba4f3abf1114348c433565, entries=150, sequenceid=249, filesize=11.9 K 2024-11-27T16:22:14,727 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp/C/9063047429b1471685b053e906a4ab46 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/C/9063047429b1471685b053e906a4ab46 2024-11-27T16:22:14,731 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/C/9063047429b1471685b053e906a4ab46, entries=150, sequenceid=249, filesize=11.9 K 2024-11-27T16:22:14,732 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for b7c24f821c64d1ed1608bef04711b574 in 198ms, sequenceid=249, compaction requested=true 2024-11-27T16:22:14,732 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for b7c24f821c64d1ed1608bef04711b574: 2024-11-27T16:22:14,732 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b7c24f821c64d1ed1608bef04711b574:A, priority=-2147483648, current under compaction store size is 1 2024-11-27T16:22:14,732 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-27T16:22:14,732 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b7c24f821c64d1ed1608bef04711b574:B, priority=-2147483648, current under compaction store size is 2 2024-11-27T16:22:14,732 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-11-27T16:22:14,732 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b7c24f821c64d1ed1608bef04711b574:C, priority=-2147483648, current under compaction store size is 3 2024-11-27T16:22:14,732 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=3), splitQueue=0 2024-11-27T16:22:14,733 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-27T16:22:14,733 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-27T16:22:14,739 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49048 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-27T16:22:14,739 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HStore(1540): b7c24f821c64d1ed1608bef04711b574/C is initiating minor compaction (all files) 2024-11-27T16:22:14,739 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of b7c24f821c64d1ed1608bef04711b574/C in TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574. 2024-11-27T16:22:14,739 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/C/7e5b17435a3d40f0ab8403eaf4dcc9d7, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/C/01c87d7424da4e7daf0c91810cd6307f, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/C/9956edc1acd844efaee5198751249934, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/C/9063047429b1471685b053e906a4ab46] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp, totalSize=47.9 K 2024-11-27T16:22:14,739 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49048 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-27T16:22:14,739 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HStore(1540): b7c24f821c64d1ed1608bef04711b574/A is initiating minor compaction (all files) 2024-11-27T16:22:14,739 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of b7c24f821c64d1ed1608bef04711b574/A in TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574. 2024-11-27T16:22:14,740 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/A/7c2aeb01bbaf4b00b63ae6f14903cc0f, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/A/92e5e329c1cb4ea4abe6350fb58a1697, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/A/a956ea6c672740d281884e6d08b82460, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/A/6395bc582d4d4f5187e4b66de81df8d3] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp, totalSize=47.9 K 2024-11-27T16:22:14,740 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting 7c2aeb01bbaf4b00b63ae6f14903cc0f, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=197, earliestPutTs=1732724531381 2024-11-27T16:22:14,741 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7e5b17435a3d40f0ab8403eaf4dcc9d7, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=197, earliestPutTs=1732724531381 2024-11-27T16:22:14,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(8581): Flush requested on b7c24f821c64d1ed1608bef04711b574 2024-11-27T16:22:14,741 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting 92e5e329c1cb4ea4abe6350fb58a1697, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=210, earliestPutTs=1732724531693 2024-11-27T16:22:14,741 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing b7c24f821c64d1ed1608bef04711b574 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-11-27T16:22:14,741 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 01c87d7424da4e7daf0c91810cd6307f, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=210, earliestPutTs=1732724531693 2024-11-27T16:22:14,741 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b7c24f821c64d1ed1608bef04711b574, store=A 2024-11-27T16:22:14,741 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:22:14,741 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b7c24f821c64d1ed1608bef04711b574, store=B 2024-11-27T16:22:14,741 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:22:14,741 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b7c24f821c64d1ed1608bef04711b574, store=C 2024-11-27T16:22:14,741 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:22:14,742 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9956edc1acd844efaee5198751249934, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=238, earliestPutTs=1732724532046 2024-11-27T16:22:14,742 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting a956ea6c672740d281884e6d08b82460, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=238, earliestPutTs=1732724532046 2024-11-27T16:22:14,743 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting 6395bc582d4d4f5187e4b66de81df8d3, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=249, earliestPutTs=1732724533915 2024-11-27T16:22:14,743 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9063047429b1471685b053e906a4ab46, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=249, earliestPutTs=1732724533915 2024-11-27T16:22:14,760 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp/A/552f8d64d15845dd933caf9b9e9b68e1 is 50, key is test_row_0/A:col10/1732724534715/Put/seqid=0 2024-11-27T16:22:14,773 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): b7c24f821c64d1ed1608bef04711b574#C#compaction#265 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 53.85 MB/second 2024-11-27T16:22:14,774 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp/C/f469cec1cc5847ccb07c92db438c5be3 is 50, key is test_row_0/C:col10/1732724534529/Put/seqid=0 2024-11-27T16:22:14,776 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): b7c24f821c64d1ed1608bef04711b574#A#compaction#266 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 53.85 MB/second 2024-11-27T16:22:14,777 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp/A/026f83a2ada548c39ad61db95f5cde2c is 50, key is test_row_0/A:col10/1732724534529/Put/seqid=0 2024-11-27T16:22:14,817 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742140_1316 (size=12731) 2024-11-27T16:22:14,823 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp/C/f469cec1cc5847ccb07c92db438c5be3 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/C/f469cec1cc5847ccb07c92db438c5be3 2024-11-27T16:22:14,827 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:14,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54228 deadline: 1732724594822, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:14,830 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742139_1315 (size=14741) 2024-11-27T16:22:14,835 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in b7c24f821c64d1ed1608bef04711b574/C of b7c24f821c64d1ed1608bef04711b574 into f469cec1cc5847ccb07c92db438c5be3(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T16:22:14,835 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for b7c24f821c64d1ed1608bef04711b574: 2024-11-27T16:22:14,835 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574., storeName=b7c24f821c64d1ed1608bef04711b574/C, priority=12, startTime=1732724534732; duration=0sec 2024-11-27T16:22:14,835 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-27T16:22:14,835 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b7c24f821c64d1ed1608bef04711b574:C 2024-11-27T16:22:14,835 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-27T16:22:14,836 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49048 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-27T16:22:14,836 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HStore(1540): b7c24f821c64d1ed1608bef04711b574/B is initiating minor compaction (all files) 2024-11-27T16:22:14,836 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of b7c24f821c64d1ed1608bef04711b574/B in TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574. 2024-11-27T16:22:14,836 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/B/2fbffab0024645f5a8f3a87b4a9fd36d, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/B/80e761e651f943cfa3e7f5c35e76dc65, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/B/14118868d9e74bd7979191fbbcf5e13d, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/B/bad35449d9ba4f3abf1114348c433565] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp, totalSize=47.9 K 2024-11-27T16:22:14,837 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2fbffab0024645f5a8f3a87b4a9fd36d, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=197, earliestPutTs=1732724531381 2024-11-27T16:22:14,837 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 80e761e651f943cfa3e7f5c35e76dc65, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=210, earliestPutTs=1732724531693 2024-11-27T16:22:14,838 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 14118868d9e74bd7979191fbbcf5e13d, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=238, earliestPutTs=1732724532046 2024-11-27T16:22:14,838 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.Compactor(224): Compacting bad35449d9ba4f3abf1114348c433565, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=249, earliestPutTs=1732724533915 2024-11-27T16:22:14,865 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742141_1317 (size=12731) 2024-11-27T16:22:14,872 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp/A/026f83a2ada548c39ad61db95f5cde2c as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/A/026f83a2ada548c39ad61db95f5cde2c 2024-11-27T16:22:14,876 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): b7c24f821c64d1ed1608bef04711b574#B#compaction#267 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 53.85 MB/second 2024-11-27T16:22:14,877 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp/B/b0429a334b4a44e4bd70d9c3444c4588 is 50, key is test_row_0/B:col10/1732724534529/Put/seqid=0 2024-11-27T16:22:14,883 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in b7c24f821c64d1ed1608bef04711b574/A of b7c24f821c64d1ed1608bef04711b574 into 026f83a2ada548c39ad61db95f5cde2c(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T16:22:14,883 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for b7c24f821c64d1ed1608bef04711b574: 2024-11-27T16:22:14,884 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574., storeName=b7c24f821c64d1ed1608bef04711b574/A, priority=12, startTime=1732724534732; duration=0sec 2024-11-27T16:22:14,884 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T16:22:14,884 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b7c24f821c64d1ed1608bef04711b574:A 2024-11-27T16:22:14,931 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:14,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54228 deadline: 1732724594928, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:14,965 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742142_1318 (size=12731) 2024-11-27T16:22:14,973 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp/B/b0429a334b4a44e4bd70d9c3444c4588 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/B/b0429a334b4a44e4bd70d9c3444c4588 2024-11-27T16:22:14,978 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in b7c24f821c64d1ed1608bef04711b574/B of b7c24f821c64d1ed1608bef04711b574 into b0429a334b4a44e4bd70d9c3444c4588(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T16:22:14,978 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for b7c24f821c64d1ed1608bef04711b574: 2024-11-27T16:22:14,978 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574., storeName=b7c24f821c64d1ed1608bef04711b574/B, priority=12, startTime=1732724534732; duration=0sec 2024-11-27T16:22:14,978 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T16:22:14,978 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b7c24f821c64d1ed1608bef04711b574:B 2024-11-27T16:22:15,137 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:15,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54228 deadline: 1732724595134, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:15,231 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=275 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp/A/552f8d64d15845dd933caf9b9e9b68e1 2024-11-27T16:22:15,246 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp/B/46297d77e113467eadbb80541de96968 is 50, key is test_row_0/B:col10/1732724534715/Put/seqid=0 2024-11-27T16:22:15,291 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742143_1319 (size=12301) 2024-11-27T16:22:15,292 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=275 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp/B/46297d77e113467eadbb80541de96968 2024-11-27T16:22:15,321 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp/C/f476703c58d542819a907410c009ab56 is 50, key is test_row_0/C:col10/1732724534715/Put/seqid=0 2024-11-27T16:22:15,365 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742144_1320 (size=12301) 2024-11-27T16:22:15,445 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:15,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54228 deadline: 1732724595439, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:15,767 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=275 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp/C/f476703c58d542819a907410c009ab56 2024-11-27T16:22:15,773 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp/A/552f8d64d15845dd933caf9b9e9b68e1 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/A/552f8d64d15845dd933caf9b9e9b68e1 2024-11-27T16:22:15,784 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/A/552f8d64d15845dd933caf9b9e9b68e1, entries=200, sequenceid=275, filesize=14.4 K 2024-11-27T16:22:15,785 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp/B/46297d77e113467eadbb80541de96968 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/B/46297d77e113467eadbb80541de96968 2024-11-27T16:22:15,790 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/B/46297d77e113467eadbb80541de96968, entries=150, sequenceid=275, filesize=12.0 K 2024-11-27T16:22:15,791 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp/C/f476703c58d542819a907410c009ab56 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/C/f476703c58d542819a907410c009ab56 2024-11-27T16:22:15,800 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/C/f476703c58d542819a907410c009ab56, entries=150, sequenceid=275, filesize=12.0 K 2024-11-27T16:22:15,802 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=46.96 KB/48090 for b7c24f821c64d1ed1608bef04711b574 in 1061ms, sequenceid=275, compaction requested=false 2024-11-27T16:22:15,802 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for b7c24f821c64d1ed1608bef04711b574: 2024-11-27T16:22:15,960 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing b7c24f821c64d1ed1608bef04711b574 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-27T16:22:15,961 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b7c24f821c64d1ed1608bef04711b574, store=A 2024-11-27T16:22:15,961 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:22:15,961 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b7c24f821c64d1ed1608bef04711b574, store=B 2024-11-27T16:22:15,961 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:22:15,961 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b7c24f821c64d1ed1608bef04711b574, store=C 2024-11-27T16:22:15,961 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:22:15,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(8581): Flush requested on b7c24f821c64d1ed1608bef04711b574 2024-11-27T16:22:15,971 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp/A/4f176e56bdce40cabcc32557bddd8d5b is 50, key is test_row_0/A:col10/1732724534819/Put/seqid=0 2024-11-27T16:22:16,026 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742145_1321 (size=14741) 2024-11-27T16:22:16,050 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=289 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp/A/4f176e56bdce40cabcc32557bddd8d5b 2024-11-27T16:22:16,066 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp/B/25595b2fdf7c4e89940fefd5c454ab68 is 50, key is test_row_0/B:col10/1732724534819/Put/seqid=0 2024-11-27T16:22:16,122 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742146_1322 (size=12301) 2024-11-27T16:22:16,130 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:16,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54228 deadline: 1732724596125, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:16,200 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:16,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54240 deadline: 1732724596199, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:16,201 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:16,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54212 deadline: 1732724596199, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:16,201 DEBUG [Thread-1211 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4144 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574., hostname=7b191dec6496,44169,1732724452967, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-27T16:22:16,201 DEBUG [Thread-1209 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4144 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574., hostname=7b191dec6496,44169,1732724452967, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-27T16:22:16,204 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:16,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 149 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54196 deadline: 1732724596203, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:16,205 DEBUG [Thread-1203 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4148 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574., hostname=7b191dec6496,44169,1732724452967, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-27T16:22:16,234 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:16,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54228 deadline: 1732724596231, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:16,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=75 2024-11-27T16:22:16,249 INFO [Thread-1213 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 75 completed 2024-11-27T16:22:16,250 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-27T16:22:16,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] procedure2.ProcedureExecutor(1098): Stored pid=77, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=77, table=TestAcidGuarantees 2024-11-27T16:22:16,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=77 2024-11-27T16:22:16,252 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=77, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=77, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-27T16:22:16,253 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=77, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=77, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-27T16:22:16,253 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=78, ppid=77, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-27T16:22:16,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=77 2024-11-27T16:22:16,405 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:22:16,405 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=78 2024-11-27T16:22:16,406 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574. 2024-11-27T16:22:16,406 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574. as already flushing 2024-11-27T16:22:16,406 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574. 2024-11-27T16:22:16,406 ERROR [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] handler.RSProcedureHandler(58): pid=78 java.io.IOException: Unable to complete flush {ENCODED => b7c24f821c64d1ed1608bef04711b574, NAME => 'TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:22:16,406 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=78 java.io.IOException: Unable to complete flush {ENCODED => b7c24f821c64d1ed1608bef04711b574, NAME => 'TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:22:16,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4114): Remote procedure failed, pid=78 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b7c24f821c64d1ed1608bef04711b574, NAME => 'TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b7c24f821c64d1ed1608bef04711b574, NAME => 'TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:22:16,439 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:16,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54228 deadline: 1732724596436, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:16,523 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=289 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp/B/25595b2fdf7c4e89940fefd5c454ab68 2024-11-27T16:22:16,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=77 2024-11-27T16:22:16,555 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp/C/338476f0040b40398c950b3f9fd2aecc is 50, key is test_row_0/C:col10/1732724534819/Put/seqid=0 2024-11-27T16:22:16,558 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:22:16,559 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=78 2024-11-27T16:22:16,559 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574. 2024-11-27T16:22:16,559 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574. as already flushing 2024-11-27T16:22:16,559 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574. 2024-11-27T16:22:16,559 ERROR [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] handler.RSProcedureHandler(58): pid=78 java.io.IOException: Unable to complete flush {ENCODED => b7c24f821c64d1ed1608bef04711b574, NAME => 'TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:22:16,559 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=78 java.io.IOException: Unable to complete flush {ENCODED => b7c24f821c64d1ed1608bef04711b574, NAME => 'TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:22:16,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4114): Remote procedure failed, pid=78 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b7c24f821c64d1ed1608bef04711b574, NAME => 'TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b7c24f821c64d1ed1608bef04711b574, NAME => 'TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:22:16,592 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742147_1323 (size=12301) 2024-11-27T16:22:16,594 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=289 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp/C/338476f0040b40398c950b3f9fd2aecc 2024-11-27T16:22:16,599 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp/A/4f176e56bdce40cabcc32557bddd8d5b as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/A/4f176e56bdce40cabcc32557bddd8d5b 2024-11-27T16:22:16,605 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/A/4f176e56bdce40cabcc32557bddd8d5b, entries=200, sequenceid=289, filesize=14.4 K 2024-11-27T16:22:16,606 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp/B/25595b2fdf7c4e89940fefd5c454ab68 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/B/25595b2fdf7c4e89940fefd5c454ab68 2024-11-27T16:22:16,612 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/B/25595b2fdf7c4e89940fefd5c454ab68, entries=150, sequenceid=289, filesize=12.0 K 2024-11-27T16:22:16,614 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp/C/338476f0040b40398c950b3f9fd2aecc as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/C/338476f0040b40398c950b3f9fd2aecc 2024-11-27T16:22:16,619 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/C/338476f0040b40398c950b3f9fd2aecc, entries=150, sequenceid=289, filesize=12.0 K 2024-11-27T16:22:16,620 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for b7c24f821c64d1ed1608bef04711b574 in 660ms, sequenceid=289, compaction requested=true 2024-11-27T16:22:16,620 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for b7c24f821c64d1ed1608bef04711b574: 2024-11-27T16:22:16,620 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b7c24f821c64d1ed1608bef04711b574:A, priority=-2147483648, current under compaction store size is 1 2024-11-27T16:22:16,620 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T16:22:16,620 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b7c24f821c64d1ed1608bef04711b574:B, priority=-2147483648, current under compaction store size is 2 2024-11-27T16:22:16,620 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-27T16:22:16,620 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b7c24f821c64d1ed1608bef04711b574:C, priority=-2147483648, current under compaction store size is 3 2024-11-27T16:22:16,621 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-11-27T16:22:16,621 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-27T16:22:16,621 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-27T16:22:16,622 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37333 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-27T16:22:16,622 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HStore(1540): b7c24f821c64d1ed1608bef04711b574/B is initiating minor compaction (all files) 2024-11-27T16:22:16,622 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of b7c24f821c64d1ed1608bef04711b574/B in TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574. 2024-11-27T16:22:16,622 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/B/b0429a334b4a44e4bd70d9c3444c4588, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/B/46297d77e113467eadbb80541de96968, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/B/25595b2fdf7c4e89940fefd5c454ab68] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp, totalSize=36.5 K 2024-11-27T16:22:16,623 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.Compactor(224): Compacting b0429a334b4a44e4bd70d9c3444c4588, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=249, earliestPutTs=1732724533915 2024-11-27T16:22:16,623 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 42213 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-27T16:22:16,623 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HStore(1540): b7c24f821c64d1ed1608bef04711b574/A is initiating minor compaction (all files) 2024-11-27T16:22:16,623 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of b7c24f821c64d1ed1608bef04711b574/A in TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574. 2024-11-27T16:22:16,623 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/A/026f83a2ada548c39ad61db95f5cde2c, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/A/552f8d64d15845dd933caf9b9e9b68e1, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/A/4f176e56bdce40cabcc32557bddd8d5b] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp, totalSize=41.2 K 2024-11-27T16:22:16,624 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 46297d77e113467eadbb80541de96968, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=275, earliestPutTs=1732724534683 2024-11-27T16:22:16,624 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting 026f83a2ada548c39ad61db95f5cde2c, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=249, earliestPutTs=1732724533915 2024-11-27T16:22:16,624 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 25595b2fdf7c4e89940fefd5c454ab68, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=289, earliestPutTs=1732724534790 2024-11-27T16:22:16,625 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting 552f8d64d15845dd933caf9b9e9b68e1, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=275, earliestPutTs=1732724534683 2024-11-27T16:22:16,625 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting 4f176e56bdce40cabcc32557bddd8d5b, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=289, earliestPutTs=1732724534773 2024-11-27T16:22:16,642 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): b7c24f821c64d1ed1608bef04711b574#A#compaction#273 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 53.85 MB/second 2024-11-27T16:22:16,642 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp/A/a5f11e05584945f99ca769344b553a5c is 50, key is test_row_0/A:col10/1732724534819/Put/seqid=0 2024-11-27T16:22:16,645 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): b7c24f821c64d1ed1608bef04711b574#B#compaction#274 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 53.85 MB/second 2024-11-27T16:22:16,646 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp/B/c31bc6079c1942719f5173c9559b727d is 50, key is test_row_0/B:col10/1732724534819/Put/seqid=0 2024-11-27T16:22:16,712 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:22:16,712 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=78 2024-11-27T16:22:16,713 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574. 2024-11-27T16:22:16,713 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(2837): Flushing b7c24f821c64d1ed1608bef04711b574 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-27T16:22:16,713 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b7c24f821c64d1ed1608bef04711b574, store=A 2024-11-27T16:22:16,713 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:22:16,713 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b7c24f821c64d1ed1608bef04711b574, store=B 2024-11-27T16:22:16,713 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:22:16,713 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b7c24f821c64d1ed1608bef04711b574, store=C 2024-11-27T16:22:16,713 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:22:16,716 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742148_1324 (size=12983) 2024-11-27T16:22:16,722 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp/B/c31bc6079c1942719f5173c9559b727d as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/B/c31bc6079c1942719f5173c9559b727d 2024-11-27T16:22:16,730 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in b7c24f821c64d1ed1608bef04711b574/B of b7c24f821c64d1ed1608bef04711b574 into c31bc6079c1942719f5173c9559b727d(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T16:22:16,730 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for b7c24f821c64d1ed1608bef04711b574: 2024-11-27T16:22:16,731 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574., storeName=b7c24f821c64d1ed1608bef04711b574/B, priority=13, startTime=1732724536620; duration=0sec 2024-11-27T16:22:16,731 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-27T16:22:16,731 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b7c24f821c64d1ed1608bef04711b574:B 2024-11-27T16:22:16,731 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-27T16:22:16,732 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37333 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-27T16:22:16,732 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HStore(1540): b7c24f821c64d1ed1608bef04711b574/C is initiating minor compaction (all files) 2024-11-27T16:22:16,732 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of b7c24f821c64d1ed1608bef04711b574/C in TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574. 2024-11-27T16:22:16,732 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/C/f469cec1cc5847ccb07c92db438c5be3, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/C/f476703c58d542819a907410c009ab56, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/C/338476f0040b40398c950b3f9fd2aecc] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp, totalSize=36.5 K 2024-11-27T16:22:16,733 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.Compactor(224): Compacting f469cec1cc5847ccb07c92db438c5be3, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=249, earliestPutTs=1732724533915 2024-11-27T16:22:16,733 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.Compactor(224): Compacting f476703c58d542819a907410c009ab56, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=275, earliestPutTs=1732724534683 2024-11-27T16:22:16,734 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 338476f0040b40398c950b3f9fd2aecc, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=289, earliestPutTs=1732724534790 2024-11-27T16:22:16,740 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742149_1325 (size=12983) 2024-11-27T16:22:16,744 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp/A/818ff8c7ba41408e883ccbe7f9e7d6c7 is 50, key is test_row_0/A:col10/1732724536058/Put/seqid=0 2024-11-27T16:22:16,746 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp/A/a5f11e05584945f99ca769344b553a5c as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/A/a5f11e05584945f99ca769344b553a5c 2024-11-27T16:22:16,750 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574. as already flushing 2024-11-27T16:22:16,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(8581): Flush requested on b7c24f821c64d1ed1608bef04711b574 2024-11-27T16:22:16,752 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in b7c24f821c64d1ed1608bef04711b574/A of b7c24f821c64d1ed1608bef04711b574 into a5f11e05584945f99ca769344b553a5c(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T16:22:16,752 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for b7c24f821c64d1ed1608bef04711b574: 2024-11-27T16:22:16,752 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574., storeName=b7c24f821c64d1ed1608bef04711b574/A, priority=13, startTime=1732724536620; duration=0sec 2024-11-27T16:22:16,752 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T16:22:16,752 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b7c24f821c64d1ed1608bef04711b574:A 2024-11-27T16:22:16,754 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): b7c24f821c64d1ed1608bef04711b574#C#compaction#276 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 53.85 MB/second 2024-11-27T16:22:16,755 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp/C/7149b2f90a3c483c8901bb68c6790d4d is 50, key is test_row_0/C:col10/1732724534819/Put/seqid=0 2024-11-27T16:22:16,801 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742151_1327 (size=12983) 2024-11-27T16:22:16,809 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp/C/7149b2f90a3c483c8901bb68c6790d4d as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/C/7149b2f90a3c483c8901bb68c6790d4d 2024-11-27T16:22:16,810 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742150_1326 (size=12301) 2024-11-27T16:22:16,812 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=314 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp/A/818ff8c7ba41408e883ccbe7f9e7d6c7 2024-11-27T16:22:16,814 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in b7c24f821c64d1ed1608bef04711b574/C of b7c24f821c64d1ed1608bef04711b574 into 7149b2f90a3c483c8901bb68c6790d4d(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T16:22:16,814 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for b7c24f821c64d1ed1608bef04711b574: 2024-11-27T16:22:16,814 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574., storeName=b7c24f821c64d1ed1608bef04711b574/C, priority=13, startTime=1732724536620; duration=0sec 2024-11-27T16:22:16,814 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T16:22:16,814 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b7c24f821c64d1ed1608bef04711b574:C 2024-11-27T16:22:16,844 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp/B/ec7c64ad8f6c4d3da9eed1374f97d442 is 50, key is test_row_0/B:col10/1732724536058/Put/seqid=0 2024-11-27T16:22:16,854 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:16,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54228 deadline: 1732724596851, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:16,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=77 2024-11-27T16:22:16,889 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742152_1328 (size=12301) 2024-11-27T16:22:16,890 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=314 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp/B/ec7c64ad8f6c4d3da9eed1374f97d442 2024-11-27T16:22:16,912 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp/C/2ba46f7e8c9f49779557155e27ac9909 is 50, key is test_row_0/C:col10/1732724536058/Put/seqid=0 2024-11-27T16:22:16,957 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742153_1329 (size=12301) 2024-11-27T16:22:16,960 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:16,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54228 deadline: 1732724596956, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:17,166 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:17,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54228 deadline: 1732724597163, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:17,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=77 2024-11-27T16:22:17,358 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=314 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp/C/2ba46f7e8c9f49779557155e27ac9909 2024-11-27T16:22:17,368 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp/A/818ff8c7ba41408e883ccbe7f9e7d6c7 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/A/818ff8c7ba41408e883ccbe7f9e7d6c7 2024-11-27T16:22:17,373 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/A/818ff8c7ba41408e883ccbe7f9e7d6c7, entries=150, sequenceid=314, filesize=12.0 K 2024-11-27T16:22:17,374 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp/B/ec7c64ad8f6c4d3da9eed1374f97d442 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/B/ec7c64ad8f6c4d3da9eed1374f97d442 2024-11-27T16:22:17,378 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/B/ec7c64ad8f6c4d3da9eed1374f97d442, entries=150, sequenceid=314, filesize=12.0 K 2024-11-27T16:22:17,379 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp/C/2ba46f7e8c9f49779557155e27ac9909 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/C/2ba46f7e8c9f49779557155e27ac9909 2024-11-27T16:22:17,386 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/C/2ba46f7e8c9f49779557155e27ac9909, entries=150, sequenceid=314, filesize=12.0 K 2024-11-27T16:22:17,387 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for b7c24f821c64d1ed1608bef04711b574 in 674ms, sequenceid=314, compaction requested=false 2024-11-27T16:22:17,387 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(2538): Flush status journal for b7c24f821c64d1ed1608bef04711b574: 2024-11-27T16:22:17,387 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574. 2024-11-27T16:22:17,388 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=78 2024-11-27T16:22:17,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4106): Remote procedure done, pid=78 2024-11-27T16:22:17,393 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=78, resume processing ppid=77 2024-11-27T16:22:17,393 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=78, ppid=77, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.1370 sec 2024-11-27T16:22:17,399 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=77, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=77, table=TestAcidGuarantees in 1.1440 sec 2024-11-27T16:22:17,475 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing b7c24f821c64d1ed1608bef04711b574 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-11-27T16:22:17,475 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b7c24f821c64d1ed1608bef04711b574, store=A 2024-11-27T16:22:17,475 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:22:17,475 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b7c24f821c64d1ed1608bef04711b574, store=B 2024-11-27T16:22:17,475 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:22:17,475 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b7c24f821c64d1ed1608bef04711b574, store=C 2024-11-27T16:22:17,475 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:22:17,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(8581): Flush requested on b7c24f821c64d1ed1608bef04711b574 2024-11-27T16:22:17,491 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp/A/7740bdb667df42ab9455c3297b6f0d2b is 50, key is test_row_0/A:col10/1732724536849/Put/seqid=0 2024-11-27T16:22:17,533 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742154_1330 (size=12301) 2024-11-27T16:22:17,646 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:17,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54228 deadline: 1732724597643, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:17,750 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:17,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54228 deadline: 1732724597748, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:17,934 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=329 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp/A/7740bdb667df42ab9455c3297b6f0d2b 2024-11-27T16:22:17,957 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:17,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54228 deadline: 1732724597953, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:17,966 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp/B/122852e5075045b48f4c7e2f91bc0f09 is 50, key is test_row_0/B:col10/1732724536849/Put/seqid=0 2024-11-27T16:22:18,011 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742155_1331 (size=12301) 2024-11-27T16:22:18,262 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:18,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54228 deadline: 1732724598259, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:18,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=77 2024-11-27T16:22:18,357 INFO [Thread-1213 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 77 completed 2024-11-27T16:22:18,359 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-27T16:22:18,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] procedure2.ProcedureExecutor(1098): Stored pid=79, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=79, table=TestAcidGuarantees 2024-11-27T16:22:18,362 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=79, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=79, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-27T16:22:18,362 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=79, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=79, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-27T16:22:18,363 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=80, ppid=79, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-27T16:22:18,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=79 2024-11-27T16:22:18,412 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=329 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp/B/122852e5075045b48f4c7e2f91bc0f09 2024-11-27T16:22:18,443 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp/C/fb6bce94783b4b69a643a8087cc906d5 is 50, key is test_row_0/C:col10/1732724536849/Put/seqid=0 2024-11-27T16:22:18,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=79 2024-11-27T16:22:18,489 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742156_1332 (size=12301) 2024-11-27T16:22:18,517 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:22:18,519 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=80 2024-11-27T16:22:18,519 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574. 2024-11-27T16:22:18,520 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574. as already flushing 2024-11-27T16:22:18,520 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574. 2024-11-27T16:22:18,520 ERROR [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] handler.RSProcedureHandler(58): pid=80 java.io.IOException: Unable to complete flush {ENCODED => b7c24f821c64d1ed1608bef04711b574, NAME => 'TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:22:18,522 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=80 java.io.IOException: Unable to complete flush {ENCODED => b7c24f821c64d1ed1608bef04711b574, NAME => 'TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:22:18,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4114): Remote procedure failed, pid=80 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b7c24f821c64d1ed1608bef04711b574, NAME => 'TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b7c24f821c64d1ed1608bef04711b574, NAME => 'TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:22:18,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=79 2024-11-27T16:22:18,678 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:22:18,679 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=80 2024-11-27T16:22:18,679 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574. 2024-11-27T16:22:18,679 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574. as already flushing 2024-11-27T16:22:18,679 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574. 2024-11-27T16:22:18,679 ERROR [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] handler.RSProcedureHandler(58): pid=80 java.io.IOException: Unable to complete flush {ENCODED => b7c24f821c64d1ed1608bef04711b574, NAME => 'TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:22:18,680 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=80 java.io.IOException: Unable to complete flush {ENCODED => b7c24f821c64d1ed1608bef04711b574, NAME => 'TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:22:18,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4114): Remote procedure failed, pid=80 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b7c24f821c64d1ed1608bef04711b574, NAME => 'TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b7c24f821c64d1ed1608bef04711b574, NAME => 'TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:22:18,766 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:18,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54228 deadline: 1732724598764, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:18,831 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:22:18,840 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=80 2024-11-27T16:22:18,841 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574. 2024-11-27T16:22:18,841 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574. as already flushing 2024-11-27T16:22:18,841 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574. 2024-11-27T16:22:18,841 ERROR [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] handler.RSProcedureHandler(58): pid=80 java.io.IOException: Unable to complete flush {ENCODED => b7c24f821c64d1ed1608bef04711b574, NAME => 'TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:22:18,841 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=80 java.io.IOException: Unable to complete flush {ENCODED => b7c24f821c64d1ed1608bef04711b574, NAME => 'TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:22:18,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4114): Remote procedure failed, pid=80 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b7c24f821c64d1ed1608bef04711b574, NAME => 'TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b7c24f821c64d1ed1608bef04711b574, NAME => 'TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:22:18,890 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=329 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp/C/fb6bce94783b4b69a643a8087cc906d5 2024-11-27T16:22:18,919 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp/A/7740bdb667df42ab9455c3297b6f0d2b as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/A/7740bdb667df42ab9455c3297b6f0d2b 2024-11-27T16:22:18,944 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/A/7740bdb667df42ab9455c3297b6f0d2b, entries=150, sequenceid=329, filesize=12.0 K 2024-11-27T16:22:18,946 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp/B/122852e5075045b48f4c7e2f91bc0f09 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/B/122852e5075045b48f4c7e2f91bc0f09 2024-11-27T16:22:18,950 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/B/122852e5075045b48f4c7e2f91bc0f09, entries=150, sequenceid=329, filesize=12.0 K 2024-11-27T16:22:18,951 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp/C/fb6bce94783b4b69a643a8087cc906d5 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/C/fb6bce94783b4b69a643a8087cc906d5 2024-11-27T16:22:18,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=79 2024-11-27T16:22:18,968 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/C/fb6bce94783b4b69a643a8087cc906d5, entries=150, sequenceid=329, filesize=12.0 K 2024-11-27T16:22:18,971 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for b7c24f821c64d1ed1608bef04711b574 in 1497ms, sequenceid=329, compaction requested=true 2024-11-27T16:22:18,971 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for b7c24f821c64d1ed1608bef04711b574: 2024-11-27T16:22:18,971 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b7c24f821c64d1ed1608bef04711b574:A, priority=-2147483648, current under compaction store size is 1 2024-11-27T16:22:18,971 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T16:22:18,972 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-27T16:22:18,972 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-27T16:22:18,973 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b7c24f821c64d1ed1608bef04711b574:B, priority=-2147483648, current under compaction store size is 2 2024-11-27T16:22:18,973 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T16:22:18,973 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37585 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-27T16:22:18,974 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HStore(1540): b7c24f821c64d1ed1608bef04711b574/B is initiating minor compaction (all files) 2024-11-27T16:22:18,974 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of b7c24f821c64d1ed1608bef04711b574/B in TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574. 2024-11-27T16:22:18,974 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/B/c31bc6079c1942719f5173c9559b727d, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/B/ec7c64ad8f6c4d3da9eed1374f97d442, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/B/122852e5075045b48f4c7e2f91bc0f09] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp, totalSize=36.7 K 2024-11-27T16:22:18,974 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37585 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-27T16:22:18,974 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HStore(1540): b7c24f821c64d1ed1608bef04711b574/A is initiating minor compaction (all files) 2024-11-27T16:22:18,974 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of b7c24f821c64d1ed1608bef04711b574/A in TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574. 2024-11-27T16:22:18,974 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/A/a5f11e05584945f99ca769344b553a5c, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/A/818ff8c7ba41408e883ccbe7f9e7d6c7, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/A/7740bdb667df42ab9455c3297b6f0d2b] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp, totalSize=36.7 K 2024-11-27T16:22:18,975 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b7c24f821c64d1ed1608bef04711b574:C, priority=-2147483648, current under compaction store size is 3 2024-11-27T16:22:18,975 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-27T16:22:18,976 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting c31bc6079c1942719f5173c9559b727d, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=289, earliestPutTs=1732724534790 2024-11-27T16:22:18,976 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.Compactor(224): Compacting a5f11e05584945f99ca769344b553a5c, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=289, earliestPutTs=1732724534790 2024-11-27T16:22:18,976 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting ec7c64ad8f6c4d3da9eed1374f97d442, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=314, earliestPutTs=1732724536058 2024-11-27T16:22:18,977 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 818ff8c7ba41408e883ccbe7f9e7d6c7, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=314, earliestPutTs=1732724536058 2024-11-27T16:22:18,977 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting 122852e5075045b48f4c7e2f91bc0f09, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=329, earliestPutTs=1732724536837 2024-11-27T16:22:18,978 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7740bdb667df42ab9455c3297b6f0d2b, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=329, earliestPutTs=1732724536837 2024-11-27T16:22:18,996 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:22:18,997 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=80 2024-11-27T16:22:18,997 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574. 2024-11-27T16:22:18,998 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(2837): Flushing b7c24f821c64d1ed1608bef04711b574 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-11-27T16:22:18,998 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b7c24f821c64d1ed1608bef04711b574, store=A 2024-11-27T16:22:18,998 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:22:18,998 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b7c24f821c64d1ed1608bef04711b574, store=B 2024-11-27T16:22:18,998 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:22:18,998 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b7c24f821c64d1ed1608bef04711b574, store=C 2024-11-27T16:22:18,998 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:22:18,999 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): b7c24f821c64d1ed1608bef04711b574#B#compaction#282 average throughput is 2.18 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 53.85 MB/second 2024-11-27T16:22:18,999 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp/B/0a7715e1ff124749a50d50de23f79425 is 50, key is test_row_0/B:col10/1732724536849/Put/seqid=0 2024-11-27T16:22:19,006 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): b7c24f821c64d1ed1608bef04711b574#A#compaction#283 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 53.85 MB/second 2024-11-27T16:22:19,006 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp/A/53a4ae47f310407a8a064cc393133148 is 50, key is test_row_0/A:col10/1732724536849/Put/seqid=0 2024-11-27T16:22:19,042 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp/A/369a2c928dd84ce7b69ca0a7ba210753 is 50, key is test_row_0/A:col10/1732724537635/Put/seqid=0 2024-11-27T16:22:19,063 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742157_1333 (size=13085) 2024-11-27T16:22:19,082 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742158_1334 (size=13085) 2024-11-27T16:22:19,132 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742159_1335 (size=12301) 2024-11-27T16:22:19,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=79 2024-11-27T16:22:19,471 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp/B/0a7715e1ff124749a50d50de23f79425 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/B/0a7715e1ff124749a50d50de23f79425 2024-11-27T16:22:19,477 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in b7c24f821c64d1ed1608bef04711b574/B of b7c24f821c64d1ed1608bef04711b574 into 0a7715e1ff124749a50d50de23f79425(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T16:22:19,477 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for b7c24f821c64d1ed1608bef04711b574: 2024-11-27T16:22:19,477 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574., storeName=b7c24f821c64d1ed1608bef04711b574/B, priority=13, startTime=1732724538972; duration=0sec 2024-11-27T16:22:19,477 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-27T16:22:19,477 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b7c24f821c64d1ed1608bef04711b574:B 2024-11-27T16:22:19,477 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-27T16:22:19,479 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37585 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-27T16:22:19,479 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HStore(1540): b7c24f821c64d1ed1608bef04711b574/C is initiating minor compaction (all files) 2024-11-27T16:22:19,479 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of b7c24f821c64d1ed1608bef04711b574/C in TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574. 2024-11-27T16:22:19,479 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/C/7149b2f90a3c483c8901bb68c6790d4d, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/C/2ba46f7e8c9f49779557155e27ac9909, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/C/fb6bce94783b4b69a643a8087cc906d5] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp, totalSize=36.7 K 2024-11-27T16:22:19,480 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting 7149b2f90a3c483c8901bb68c6790d4d, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=289, earliestPutTs=1732724534790 2024-11-27T16:22:19,481 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting 2ba46f7e8c9f49779557155e27ac9909, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=314, earliestPutTs=1732724536058 2024-11-27T16:22:19,481 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting fb6bce94783b4b69a643a8087cc906d5, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=329, earliestPutTs=1732724536837 2024-11-27T16:22:19,489 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp/A/53a4ae47f310407a8a064cc393133148 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/A/53a4ae47f310407a8a064cc393133148 2024-11-27T16:22:19,494 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in b7c24f821c64d1ed1608bef04711b574/A of b7c24f821c64d1ed1608bef04711b574 into 53a4ae47f310407a8a064cc393133148(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T16:22:19,494 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for b7c24f821c64d1ed1608bef04711b574: 2024-11-27T16:22:19,494 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574., storeName=b7c24f821c64d1ed1608bef04711b574/A, priority=13, startTime=1732724538971; duration=0sec 2024-11-27T16:22:19,495 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T16:22:19,495 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b7c24f821c64d1ed1608bef04711b574:A 2024-11-27T16:22:19,507 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): b7c24f821c64d1ed1608bef04711b574#C#compaction#285 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 53.85 MB/second 2024-11-27T16:22:19,508 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp/C/1f8eeb532c9a45e1998b9b49114fbca0 is 50, key is test_row_0/C:col10/1732724536849/Put/seqid=0 2024-11-27T16:22:19,533 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=353 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp/A/369a2c928dd84ce7b69ca0a7ba210753 2024-11-27T16:22:19,554 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742160_1336 (size=13085) 2024-11-27T16:22:19,565 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp/B/26c8e01f0cc446bca01a9e1d133f8ab5 is 50, key is test_row_0/B:col10/1732724537635/Put/seqid=0 2024-11-27T16:22:19,616 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742161_1337 (size=12301) 2024-11-27T16:22:19,617 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=353 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp/B/26c8e01f0cc446bca01a9e1d133f8ab5 2024-11-27T16:22:19,639 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp/C/723cdc69c4854c08872d60ecbfcbd023 is 50, key is test_row_0/C:col10/1732724537635/Put/seqid=0 2024-11-27T16:22:19,667 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742162_1338 (size=12301) 2024-11-27T16:22:19,668 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=353 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp/C/723cdc69c4854c08872d60ecbfcbd023 2024-11-27T16:22:19,674 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp/A/369a2c928dd84ce7b69ca0a7ba210753 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/A/369a2c928dd84ce7b69ca0a7ba210753 2024-11-27T16:22:19,679 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/A/369a2c928dd84ce7b69ca0a7ba210753, entries=150, sequenceid=353, filesize=12.0 K 2024-11-27T16:22:19,681 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp/B/26c8e01f0cc446bca01a9e1d133f8ab5 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/B/26c8e01f0cc446bca01a9e1d133f8ab5 2024-11-27T16:22:19,685 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/B/26c8e01f0cc446bca01a9e1d133f8ab5, entries=150, sequenceid=353, filesize=12.0 K 2024-11-27T16:22:19,686 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp/C/723cdc69c4854c08872d60ecbfcbd023 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/C/723cdc69c4854c08872d60ecbfcbd023 2024-11-27T16:22:19,697 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/C/723cdc69c4854c08872d60ecbfcbd023, entries=150, sequenceid=353, filesize=12.0 K 2024-11-27T16:22:19,698 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=0 B/0 for b7c24f821c64d1ed1608bef04711b574 in 700ms, sequenceid=353, compaction requested=false 2024-11-27T16:22:19,699 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(2538): Flush status journal for b7c24f821c64d1ed1608bef04711b574: 2024-11-27T16:22:19,699 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574. 2024-11-27T16:22:19,699 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=80 2024-11-27T16:22:19,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4106): Remote procedure done, pid=80 2024-11-27T16:22:19,702 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=80, resume processing ppid=79 2024-11-27T16:22:19,702 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=80, ppid=79, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.3380 sec 2024-11-27T16:22:19,704 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=79, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=79, table=TestAcidGuarantees in 1.3440 sec 2024-11-27T16:22:19,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(8581): Flush requested on b7c24f821c64d1ed1608bef04711b574 2024-11-27T16:22:19,823 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing b7c24f821c64d1ed1608bef04711b574 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-27T16:22:19,823 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b7c24f821c64d1ed1608bef04711b574, store=A 2024-11-27T16:22:19,824 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:22:19,824 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b7c24f821c64d1ed1608bef04711b574, store=B 2024-11-27T16:22:19,824 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:22:19,824 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b7c24f821c64d1ed1608bef04711b574, store=C 2024-11-27T16:22:19,824 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:22:19,843 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp/A/80b8b22623464778a47efa749e7cd57b is 50, key is test_row_0/A:col10/1732724539822/Put/seqid=0 2024-11-27T16:22:19,874 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742163_1339 (size=14741) 2024-11-27T16:22:19,876 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=366 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp/A/80b8b22623464778a47efa749e7cd57b 2024-11-27T16:22:19,894 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp/B/00ed872b45bc4149a9d812bc0b354826 is 50, key is test_row_0/B:col10/1732724539822/Put/seqid=0 2024-11-27T16:22:19,959 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742164_1340 (size=12301) 2024-11-27T16:22:19,962 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp/C/1f8eeb532c9a45e1998b9b49114fbca0 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/C/1f8eeb532c9a45e1998b9b49114fbca0 2024-11-27T16:22:19,968 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in b7c24f821c64d1ed1608bef04711b574/C of b7c24f821c64d1ed1608bef04711b574 into 1f8eeb532c9a45e1998b9b49114fbca0(size=12.8 K), total size for store is 24.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T16:22:19,968 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for b7c24f821c64d1ed1608bef04711b574: 2024-11-27T16:22:19,968 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574., storeName=b7c24f821c64d1ed1608bef04711b574/C, priority=13, startTime=1732724538973; duration=0sec 2024-11-27T16:22:19,968 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T16:22:19,968 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b7c24f821c64d1ed1608bef04711b574:C 2024-11-27T16:22:20,017 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:20,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 173 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54228 deadline: 1732724600014, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:20,122 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:20,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 175 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54228 deadline: 1732724600119, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:20,215 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:20,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54240 deadline: 1732724600212, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:20,216 DEBUG [Thread-1211 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8159 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574., hostname=7b191dec6496,44169,1732724452967, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-27T16:22:20,221 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:20,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54212 deadline: 1732724600218, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:20,222 DEBUG [Thread-1209 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8165 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574., hostname=7b191dec6496,44169,1732724452967, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-27T16:22:20,238 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:20,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 151 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54196 deadline: 1732724600235, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:20,239 DEBUG [Thread-1203 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8182 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574., hostname=7b191dec6496,44169,1732724452967, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-27T16:22:20,331 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:20,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 177 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54228 deadline: 1732724600325, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:20,360 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=366 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp/B/00ed872b45bc4149a9d812bc0b354826 2024-11-27T16:22:20,375 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp/C/72a772dfc7d2448090e153dba7924ec3 is 50, key is test_row_0/C:col10/1732724539822/Put/seqid=0 2024-11-27T16:22:20,428 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742165_1341 (size=12301) 2024-11-27T16:22:20,429 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=366 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp/C/72a772dfc7d2448090e153dba7924ec3 2024-11-27T16:22:20,438 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp/A/80b8b22623464778a47efa749e7cd57b as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/A/80b8b22623464778a47efa749e7cd57b 2024-11-27T16:22:20,443 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/A/80b8b22623464778a47efa749e7cd57b, entries=200, sequenceid=366, filesize=14.4 K 2024-11-27T16:22:20,444 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp/B/00ed872b45bc4149a9d812bc0b354826 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/B/00ed872b45bc4149a9d812bc0b354826 2024-11-27T16:22:20,449 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/B/00ed872b45bc4149a9d812bc0b354826, entries=150, sequenceid=366, filesize=12.0 K 2024-11-27T16:22:20,451 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp/C/72a772dfc7d2448090e153dba7924ec3 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/C/72a772dfc7d2448090e153dba7924ec3 2024-11-27T16:22:20,456 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/C/72a772dfc7d2448090e153dba7924ec3, entries=150, sequenceid=366, filesize=12.0 K 2024-11-27T16:22:20,458 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for b7c24f821c64d1ed1608bef04711b574 in 635ms, sequenceid=366, compaction requested=true 2024-11-27T16:22:20,458 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for b7c24f821c64d1ed1608bef04711b574: 2024-11-27T16:22:20,458 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b7c24f821c64d1ed1608bef04711b574:A, priority=-2147483648, current under compaction store size is 1 2024-11-27T16:22:20,458 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T16:22:20,458 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b7c24f821c64d1ed1608bef04711b574:B, priority=-2147483648, current under compaction store size is 2 2024-11-27T16:22:20,458 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-27T16:22:20,458 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T16:22:20,458 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-27T16:22:20,458 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b7c24f821c64d1ed1608bef04711b574:C, priority=-2147483648, current under compaction store size is 3 2024-11-27T16:22:20,458 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-27T16:22:20,461 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37687 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-27T16:22:20,461 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HStore(1540): b7c24f821c64d1ed1608bef04711b574/B is initiating minor compaction (all files) 2024-11-27T16:22:20,461 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of b7c24f821c64d1ed1608bef04711b574/B in TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574. 2024-11-27T16:22:20,461 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/B/0a7715e1ff124749a50d50de23f79425, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/B/26c8e01f0cc446bca01a9e1d133f8ab5, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/B/00ed872b45bc4149a9d812bc0b354826] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp, totalSize=36.8 K 2024-11-27T16:22:20,462 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting 0a7715e1ff124749a50d50de23f79425, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=329, earliestPutTs=1732724536837 2024-11-27T16:22:20,462 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 40127 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-27T16:22:20,462 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HStore(1540): b7c24f821c64d1ed1608bef04711b574/A is initiating minor compaction (all files) 2024-11-27T16:22:20,462 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of b7c24f821c64d1ed1608bef04711b574/A in TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574. 2024-11-27T16:22:20,462 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/A/53a4ae47f310407a8a064cc393133148, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/A/369a2c928dd84ce7b69ca0a7ba210753, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/A/80b8b22623464778a47efa749e7cd57b] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp, totalSize=39.2 K 2024-11-27T16:22:20,462 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting 26c8e01f0cc446bca01a9e1d133f8ab5, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=353, earliestPutTs=1732724537609 2024-11-27T16:22:20,463 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting 00ed872b45bc4149a9d812bc0b354826, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=366, earliestPutTs=1732724539797 2024-11-27T16:22:20,463 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 53a4ae47f310407a8a064cc393133148, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=329, earliestPutTs=1732724536837 2024-11-27T16:22:20,464 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 369a2c928dd84ce7b69ca0a7ba210753, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=353, earliestPutTs=1732724537609 2024-11-27T16:22:20,464 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 80b8b22623464778a47efa749e7cd57b, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=366, earliestPutTs=1732724539797 2024-11-27T16:22:20,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=79 2024-11-27T16:22:20,468 INFO [Thread-1213 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 79 completed 2024-11-27T16:22:20,470 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-27T16:22:20,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] procedure2.ProcedureExecutor(1098): Stored pid=81, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=81, table=TestAcidGuarantees 2024-11-27T16:22:20,473 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=81, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=81, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-27T16:22:20,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-11-27T16:22:20,474 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=81, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=81, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-27T16:22:20,474 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=82, ppid=81, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-27T16:22:20,486 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): b7c24f821c64d1ed1608bef04711b574#B#compaction#291 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 53.85 MB/second 2024-11-27T16:22:20,487 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp/B/a2885890f8fa4430a35c119b3f269105 is 50, key is test_row_0/B:col10/1732724539822/Put/seqid=0 2024-11-27T16:22:20,495 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): b7c24f821c64d1ed1608bef04711b574#A#compaction#292 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 53.85 MB/second 2024-11-27T16:22:20,495 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp/A/952f894e079f40aa8cb43e1aafc80f25 is 50, key is test_row_0/A:col10/1732724539822/Put/seqid=0 2024-11-27T16:22:20,569 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742166_1342 (size=13187) 2024-11-27T16:22:20,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-11-27T16:22:20,575 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp/B/a2885890f8fa4430a35c119b3f269105 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/B/a2885890f8fa4430a35c119b3f269105 2024-11-27T16:22:20,583 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in b7c24f821c64d1ed1608bef04711b574/B of b7c24f821c64d1ed1608bef04711b574 into a2885890f8fa4430a35c119b3f269105(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T16:22:20,583 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for b7c24f821c64d1ed1608bef04711b574: 2024-11-27T16:22:20,583 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574., storeName=b7c24f821c64d1ed1608bef04711b574/B, priority=13, startTime=1732724540458; duration=0sec 2024-11-27T16:22:20,583 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-27T16:22:20,583 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b7c24f821c64d1ed1608bef04711b574:B 2024-11-27T16:22:20,583 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-27T16:22:20,586 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37687 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-27T16:22:20,586 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HStore(1540): b7c24f821c64d1ed1608bef04711b574/C is initiating minor compaction (all files) 2024-11-27T16:22:20,586 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of b7c24f821c64d1ed1608bef04711b574/C in TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574. 2024-11-27T16:22:20,586 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/C/1f8eeb532c9a45e1998b9b49114fbca0, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/C/723cdc69c4854c08872d60ecbfcbd023, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/C/72a772dfc7d2448090e153dba7924ec3] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp, totalSize=36.8 K 2024-11-27T16:22:20,587 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting 1f8eeb532c9a45e1998b9b49114fbca0, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=329, earliestPutTs=1732724536837 2024-11-27T16:22:20,587 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting 723cdc69c4854c08872d60ecbfcbd023, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=353, earliestPutTs=1732724537609 2024-11-27T16:22:20,588 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting 72a772dfc7d2448090e153dba7924ec3, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=366, earliestPutTs=1732724539797 2024-11-27T16:22:20,589 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742167_1343 (size=13187) 2024-11-27T16:22:20,594 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp/A/952f894e079f40aa8cb43e1aafc80f25 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/A/952f894e079f40aa8cb43e1aafc80f25 2024-11-27T16:22:20,600 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in b7c24f821c64d1ed1608bef04711b574/A of b7c24f821c64d1ed1608bef04711b574 into 952f894e079f40aa8cb43e1aafc80f25(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T16:22:20,600 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for b7c24f821c64d1ed1608bef04711b574: 2024-11-27T16:22:20,600 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574., storeName=b7c24f821c64d1ed1608bef04711b574/A, priority=13, startTime=1732724540458; duration=0sec 2024-11-27T16:22:20,600 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T16:22:20,600 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b7c24f821c64d1ed1608bef04711b574:A 2024-11-27T16:22:20,605 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): b7c24f821c64d1ed1608bef04711b574#C#compaction#293 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 53.85 MB/second 2024-11-27T16:22:20,606 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp/C/9aa024b785044948a237cd5d82874b77 is 50, key is test_row_0/C:col10/1732724539822/Put/seqid=0 2024-11-27T16:22:20,628 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:22:20,629 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=82 2024-11-27T16:22:20,630 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574. 2024-11-27T16:22:20,631 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegion(2837): Flushing b7c24f821c64d1ed1608bef04711b574 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-27T16:22:20,631 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b7c24f821c64d1ed1608bef04711b574, store=A 2024-11-27T16:22:20,631 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:22:20,631 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b7c24f821c64d1ed1608bef04711b574, store=B 2024-11-27T16:22:20,631 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:22:20,631 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b7c24f821c64d1ed1608bef04711b574, store=C 2024-11-27T16:22:20,631 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:22:20,640 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp/A/2ea8b608f07645729438f43b1a095ec7 is 50, key is test_row_0/A:col10/1732724539981/Put/seqid=0 2024-11-27T16:22:20,641 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574. as already flushing 2024-11-27T16:22:20,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(8581): Flush requested on b7c24f821c64d1ed1608bef04711b574 2024-11-27T16:22:20,671 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742168_1344 (size=13187) 2024-11-27T16:22:20,676 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp/C/9aa024b785044948a237cd5d82874b77 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/C/9aa024b785044948a237cd5d82874b77 2024-11-27T16:22:20,680 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:20,680 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in b7c24f821c64d1ed1608bef04711b574/C of b7c24f821c64d1ed1608bef04711b574 into 9aa024b785044948a237cd5d82874b77(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T16:22:20,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 187 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54228 deadline: 1732724600678, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:20,681 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for b7c24f821c64d1ed1608bef04711b574: 2024-11-27T16:22:20,681 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574., storeName=b7c24f821c64d1ed1608bef04711b574/C, priority=13, startTime=1732724540458; duration=0sec 2024-11-27T16:22:20,681 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T16:22:20,681 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b7c24f821c64d1ed1608bef04711b574:C 2024-11-27T16:22:20,708 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742169_1345 (size=12301) 2024-11-27T16:22:20,709 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=394 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp/A/2ea8b608f07645729438f43b1a095ec7 2024-11-27T16:22:20,730 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp/B/71fee095526d41359c5eb7772cb577a7 is 50, key is test_row_0/B:col10/1732724539981/Put/seqid=0 2024-11-27T16:22:20,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-11-27T16:22:20,776 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742170_1346 (size=12301) 2024-11-27T16:22:20,777 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=394 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp/B/71fee095526d41359c5eb7772cb577a7 2024-11-27T16:22:20,786 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:20,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 189 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54228 deadline: 1732724600782, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:20,789 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp/C/6d00eb408a18495e8c7f30b6de5f00f7 is 50, key is test_row_0/C:col10/1732724539981/Put/seqid=0 2024-11-27T16:22:20,839 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742171_1347 (size=12301) 2024-11-27T16:22:20,842 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=394 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp/C/6d00eb408a18495e8c7f30b6de5f00f7 2024-11-27T16:22:20,858 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp/A/2ea8b608f07645729438f43b1a095ec7 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/A/2ea8b608f07645729438f43b1a095ec7 2024-11-27T16:22:20,862 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/A/2ea8b608f07645729438f43b1a095ec7, entries=150, sequenceid=394, filesize=12.0 K 2024-11-27T16:22:20,865 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp/B/71fee095526d41359c5eb7772cb577a7 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/B/71fee095526d41359c5eb7772cb577a7 2024-11-27T16:22:20,871 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/B/71fee095526d41359c5eb7772cb577a7, entries=150, sequenceid=394, filesize=12.0 K 2024-11-27T16:22:20,873 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp/C/6d00eb408a18495e8c7f30b6de5f00f7 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/C/6d00eb408a18495e8c7f30b6de5f00f7 2024-11-27T16:22:20,878 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/C/6d00eb408a18495e8c7f30b6de5f00f7, entries=150, sequenceid=394, filesize=12.0 K 2024-11-27T16:22:20,879 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for b7c24f821c64d1ed1608bef04711b574 in 249ms, sequenceid=394, compaction requested=false 2024-11-27T16:22:20,879 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegion(2538): Flush status journal for b7c24f821c64d1ed1608bef04711b574: 2024-11-27T16:22:20,879 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574. 2024-11-27T16:22:20,879 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=82 2024-11-27T16:22:20,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4106): Remote procedure done, pid=82 2024-11-27T16:22:20,882 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=82, resume processing ppid=81 2024-11-27T16:22:20,883 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=82, ppid=81, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 407 msec 2024-11-27T16:22:20,888 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=81, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=81, table=TestAcidGuarantees in 414 msec 2024-11-27T16:22:20,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(8581): Flush requested on b7c24f821c64d1ed1608bef04711b574 2024-11-27T16:22:20,996 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing b7c24f821c64d1ed1608bef04711b574 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-11-27T16:22:20,997 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b7c24f821c64d1ed1608bef04711b574, store=A 2024-11-27T16:22:20,997 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:22:20,997 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b7c24f821c64d1ed1608bef04711b574, store=B 2024-11-27T16:22:20,997 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:22:20,997 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b7c24f821c64d1ed1608bef04711b574, store=C 2024-11-27T16:22:20,997 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:22:21,007 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp/A/cd0084a7d4244ed69cb9ca9407aa9c67 is 50, key is test_row_0/A:col10/1732724540637/Put/seqid=0 2024-11-27T16:22:21,062 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742172_1348 (size=14741) 2024-11-27T16:22:21,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-11-27T16:22:21,077 INFO [Thread-1213 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 81 completed 2024-11-27T16:22:21,079 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-27T16:22:21,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] procedure2.ProcedureExecutor(1098): Stored pid=83, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=83, table=TestAcidGuarantees 2024-11-27T16:22:21,081 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=83, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=83, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-27T16:22:21,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=83 2024-11-27T16:22:21,082 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=83, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=83, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-27T16:22:21,082 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=84, ppid=83, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-27T16:22:21,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=83 2024-11-27T16:22:21,188 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:21,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 213 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54228 deadline: 1732724601184, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:21,234 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:22:21,234 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=84 2024-11-27T16:22:21,234 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574. 2024-11-27T16:22:21,235 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574. as already flushing 2024-11-27T16:22:21,235 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574. 2024-11-27T16:22:21,235 ERROR [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] handler.RSProcedureHandler(58): pid=84 java.io.IOException: Unable to complete flush {ENCODED => b7c24f821c64d1ed1608bef04711b574, NAME => 'TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:22:21,235 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=84 java.io.IOException: Unable to complete flush {ENCODED => b7c24f821c64d1ed1608bef04711b574, NAME => 'TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:22:21,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4114): Remote procedure failed, pid=84 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b7c24f821c64d1ed1608bef04711b574, NAME => 'TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b7c24f821c64d1ed1608bef04711b574, NAME => 'TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:22:21,291 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:21,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 215 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54228 deadline: 1732724601290, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:21,292 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-27T16:22:21,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=83 2024-11-27T16:22:21,387 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:22:21,388 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=84 2024-11-27T16:22:21,388 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574. 2024-11-27T16:22:21,388 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574. as already flushing 2024-11-27T16:22:21,388 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574. 2024-11-27T16:22:21,388 ERROR [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] handler.RSProcedureHandler(58): pid=84 java.io.IOException: Unable to complete flush {ENCODED => b7c24f821c64d1ed1608bef04711b574, NAME => 'TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:22:21,389 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=84 java.io.IOException: Unable to complete flush {ENCODED => b7c24f821c64d1ed1608bef04711b574, NAME => 'TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:22:21,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4114): Remote procedure failed, pid=84 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b7c24f821c64d1ed1608bef04711b574, NAME => 'TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b7c24f821c64d1ed1608bef04711b574, NAME => 'TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:22:21,464 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=407 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp/A/cd0084a7d4244ed69cb9ca9407aa9c67 2024-11-27T16:22:21,485 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp/B/3c480c5bc38e43c5a06c9e6b934e539b is 50, key is test_row_0/B:col10/1732724540637/Put/seqid=0 2024-11-27T16:22:21,495 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:21,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 217 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54228 deadline: 1732724601492, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:21,529 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742173_1349 (size=12301) 2024-11-27T16:22:21,541 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:22:21,541 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=84 2024-11-27T16:22:21,541 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574. 2024-11-27T16:22:21,542 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574. as already flushing 2024-11-27T16:22:21,542 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574. 2024-11-27T16:22:21,542 ERROR [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] handler.RSProcedureHandler(58): pid=84 java.io.IOException: Unable to complete flush {ENCODED => b7c24f821c64d1ed1608bef04711b574, NAME => 'TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:22:21,542 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=84 java.io.IOException: Unable to complete flush {ENCODED => b7c24f821c64d1ed1608bef04711b574, NAME => 'TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:22:21,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4114): Remote procedure failed, pid=84 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b7c24f821c64d1ed1608bef04711b574, NAME => 'TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b7c24f821c64d1ed1608bef04711b574, NAME => 'TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:22:21,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=83 2024-11-27T16:22:21,694 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:22:21,695 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=84 2024-11-27T16:22:21,695 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574. 2024-11-27T16:22:21,695 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574. as already flushing 2024-11-27T16:22:21,695 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574. 2024-11-27T16:22:21,695 ERROR [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] handler.RSProcedureHandler(58): pid=84 java.io.IOException: Unable to complete flush {ENCODED => b7c24f821c64d1ed1608bef04711b574, NAME => 'TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:22:21,695 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=84 java.io.IOException: Unable to complete flush {ENCODED => b7c24f821c64d1ed1608bef04711b574, NAME => 'TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:22:21,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4114): Remote procedure failed, pid=84 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b7c24f821c64d1ed1608bef04711b574, NAME => 'TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b7c24f821c64d1ed1608bef04711b574, NAME => 'TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:22:21,803 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:21,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 219 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54228 deadline: 1732724601799, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:21,848 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:22:21,848 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=84 2024-11-27T16:22:21,849 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574. 2024-11-27T16:22:21,849 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574. as already flushing 2024-11-27T16:22:21,849 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574. 2024-11-27T16:22:21,849 ERROR [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] handler.RSProcedureHandler(58): pid=84 java.io.IOException: Unable to complete flush {ENCODED => b7c24f821c64d1ed1608bef04711b574, NAME => 'TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:22:21,849 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=84 java.io.IOException: Unable to complete flush {ENCODED => b7c24f821c64d1ed1608bef04711b574, NAME => 'TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:22:21,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4114): Remote procedure failed, pid=84 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b7c24f821c64d1ed1608bef04711b574, NAME => 'TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b7c24f821c64d1ed1608bef04711b574, NAME => 'TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:22:21,929 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=407 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp/B/3c480c5bc38e43c5a06c9e6b934e539b 2024-11-27T16:22:21,957 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp/C/a5ecf59ac00842d9bb5b8a69e1de1669 is 50, key is test_row_0/C:col10/1732724540637/Put/seqid=0 2024-11-27T16:22:22,001 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:22:22,002 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=84 2024-11-27T16:22:22,004 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574. 2024-11-27T16:22:22,004 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574. as already flushing 2024-11-27T16:22:22,004 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574. 2024-11-27T16:22:22,004 ERROR [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] handler.RSProcedureHandler(58): pid=84 java.io.IOException: Unable to complete flush {ENCODED => b7c24f821c64d1ed1608bef04711b574, NAME => 'TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:22:22,004 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=84 java.io.IOException: Unable to complete flush {ENCODED => b7c24f821c64d1ed1608bef04711b574, NAME => 'TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:22:22,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4114): Remote procedure failed, pid=84 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b7c24f821c64d1ed1608bef04711b574, NAME => 'TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b7c24f821c64d1ed1608bef04711b574, NAME => 'TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:22:22,012 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742174_1350 (size=12301) 2024-11-27T16:22:22,157 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:22:22,160 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=84 2024-11-27T16:22:22,160 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574. 2024-11-27T16:22:22,160 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574. as already flushing 2024-11-27T16:22:22,160 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574. 2024-11-27T16:22:22,161 ERROR [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] handler.RSProcedureHandler(58): pid=84 java.io.IOException: Unable to complete flush {ENCODED => b7c24f821c64d1ed1608bef04711b574, NAME => 'TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:22:22,161 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=84 java.io.IOException: Unable to complete flush {ENCODED => b7c24f821c64d1ed1608bef04711b574, NAME => 'TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:22:22,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4114): Remote procedure failed, pid=84 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b7c24f821c64d1ed1608bef04711b574, NAME => 'TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b7c24f821c64d1ed1608bef04711b574, NAME => 'TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:22:22,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=83 2024-11-27T16:22:22,313 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:22,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 221 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54228 deadline: 1732724602308, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:22,313 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:22:22,314 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=84 2024-11-27T16:22:22,314 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574. 2024-11-27T16:22:22,314 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574. as already flushing 2024-11-27T16:22:22,314 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574. 2024-11-27T16:22:22,314 ERROR [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] handler.RSProcedureHandler(58): pid=84 java.io.IOException: Unable to complete flush {ENCODED => b7c24f821c64d1ed1608bef04711b574, NAME => 'TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:22:22,315 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=84 java.io.IOException: Unable to complete flush {ENCODED => b7c24f821c64d1ed1608bef04711b574, NAME => 'TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:22:22,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4114): Remote procedure failed, pid=84 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b7c24f821c64d1ed1608bef04711b574, NAME => 'TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b7c24f821c64d1ed1608bef04711b574, NAME => 'TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:22:22,413 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=407 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp/C/a5ecf59ac00842d9bb5b8a69e1de1669 2024-11-27T16:22:22,426 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp/A/cd0084a7d4244ed69cb9ca9407aa9c67 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/A/cd0084a7d4244ed69cb9ca9407aa9c67 2024-11-27T16:22:22,431 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/A/cd0084a7d4244ed69cb9ca9407aa9c67, entries=200, sequenceid=407, filesize=14.4 K 2024-11-27T16:22:22,432 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp/B/3c480c5bc38e43c5a06c9e6b934e539b as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/B/3c480c5bc38e43c5a06c9e6b934e539b 2024-11-27T16:22:22,436 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/B/3c480c5bc38e43c5a06c9e6b934e539b, entries=150, sequenceid=407, filesize=12.0 K 2024-11-27T16:22:22,437 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp/C/a5ecf59ac00842d9bb5b8a69e1de1669 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/C/a5ecf59ac00842d9bb5b8a69e1de1669 2024-11-27T16:22:22,443 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/C/a5ecf59ac00842d9bb5b8a69e1de1669, entries=150, sequenceid=407, filesize=12.0 K 2024-11-27T16:22:22,444 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for b7c24f821c64d1ed1608bef04711b574 in 1448ms, sequenceid=407, compaction requested=true 2024-11-27T16:22:22,444 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for b7c24f821c64d1ed1608bef04711b574: 2024-11-27T16:22:22,445 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-27T16:22:22,445 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b7c24f821c64d1ed1608bef04711b574:A, priority=-2147483648, current under compaction store size is 1 2024-11-27T16:22:22,445 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T16:22:22,445 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b7c24f821c64d1ed1608bef04711b574:B, priority=-2147483648, current under compaction store size is 2 2024-11-27T16:22:22,445 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T16:22:22,445 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b7c24f821c64d1ed1608bef04711b574:C, priority=-2147483648, current under compaction store size is 3 2024-11-27T16:22:22,445 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-27T16:22:22,445 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-27T16:22:22,446 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 40229 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-27T16:22:22,446 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HStore(1540): b7c24f821c64d1ed1608bef04711b574/A is initiating minor compaction (all files) 2024-11-27T16:22:22,446 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of b7c24f821c64d1ed1608bef04711b574/A in TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574. 2024-11-27T16:22:22,446 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/A/952f894e079f40aa8cb43e1aafc80f25, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/A/2ea8b608f07645729438f43b1a095ec7, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/A/cd0084a7d4244ed69cb9ca9407aa9c67] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp, totalSize=39.3 K 2024-11-27T16:22:22,448 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37789 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-27T16:22:22,448 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HStore(1540): b7c24f821c64d1ed1608bef04711b574/B is initiating minor compaction (all files) 2024-11-27T16:22:22,448 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of b7c24f821c64d1ed1608bef04711b574/B in TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574. 2024-11-27T16:22:22,448 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/B/a2885890f8fa4430a35c119b3f269105, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/B/71fee095526d41359c5eb7772cb577a7, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/B/3c480c5bc38e43c5a06c9e6b934e539b] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp, totalSize=36.9 K 2024-11-27T16:22:22,448 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 952f894e079f40aa8cb43e1aafc80f25, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=366, earliestPutTs=1732724539797 2024-11-27T16:22:22,449 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting a2885890f8fa4430a35c119b3f269105, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=366, earliestPutTs=1732724539797 2024-11-27T16:22:22,449 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2ea8b608f07645729438f43b1a095ec7, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=394, earliestPutTs=1732724539981 2024-11-27T16:22:22,450 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting 71fee095526d41359c5eb7772cb577a7, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=394, earliestPutTs=1732724539981 2024-11-27T16:22:22,450 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting 3c480c5bc38e43c5a06c9e6b934e539b, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=407, earliestPutTs=1732724540637 2024-11-27T16:22:22,451 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.Compactor(224): Compacting cd0084a7d4244ed69cb9ca9407aa9c67, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=407, earliestPutTs=1732724540637 2024-11-27T16:22:22,466 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:22:22,474 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=84 2024-11-27T16:22:22,475 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574. 2024-11-27T16:22:22,475 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegion(2837): Flushing b7c24f821c64d1ed1608bef04711b574 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-11-27T16:22:22,475 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b7c24f821c64d1ed1608bef04711b574, store=A 2024-11-27T16:22:22,475 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:22:22,475 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b7c24f821c64d1ed1608bef04711b574, store=B 2024-11-27T16:22:22,475 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:22:22,475 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b7c24f821c64d1ed1608bef04711b574, store=C 2024-11-27T16:22:22,475 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:22:22,493 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): b7c24f821c64d1ed1608bef04711b574#B#compaction#300 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 53.85 MB/second 2024-11-27T16:22:22,494 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp/B/49266a4e6a94462d8a3d6f04c1d2caf8 is 50, key is test_row_0/B:col10/1732724540637/Put/seqid=0 2024-11-27T16:22:22,502 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): b7c24f821c64d1ed1608bef04711b574#A#compaction#301 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 53.85 MB/second 2024-11-27T16:22:22,503 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp/A/624b9ecd139341a6bff98258b07478de is 50, key is test_row_0/A:col10/1732724540637/Put/seqid=0 2024-11-27T16:22:22,516 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp/A/9e9951d516b34f038d5146105a396f56 is 50, key is test_row_0/A:col10/1732724541127/Put/seqid=0 2024-11-27T16:22:22,613 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742176_1352 (size=13289) 2024-11-27T16:22:22,624 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742175_1351 (size=13289) 2024-11-27T16:22:22,640 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp/B/49266a4e6a94462d8a3d6f04c1d2caf8 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/B/49266a4e6a94462d8a3d6f04c1d2caf8 2024-11-27T16:22:22,649 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in b7c24f821c64d1ed1608bef04711b574/B of b7c24f821c64d1ed1608bef04711b574 into 49266a4e6a94462d8a3d6f04c1d2caf8(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T16:22:22,649 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for b7c24f821c64d1ed1608bef04711b574: 2024-11-27T16:22:22,649 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574., storeName=b7c24f821c64d1ed1608bef04711b574/B, priority=13, startTime=1732724542445; duration=0sec 2024-11-27T16:22:22,649 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-27T16:22:22,649 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b7c24f821c64d1ed1608bef04711b574:B 2024-11-27T16:22:22,649 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-27T16:22:22,651 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37789 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-27T16:22:22,651 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HStore(1540): b7c24f821c64d1ed1608bef04711b574/C is initiating minor compaction (all files) 2024-11-27T16:22:22,651 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of b7c24f821c64d1ed1608bef04711b574/C in TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574. 2024-11-27T16:22:22,651 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/C/9aa024b785044948a237cd5d82874b77, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/C/6d00eb408a18495e8c7f30b6de5f00f7, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/C/a5ecf59ac00842d9bb5b8a69e1de1669] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp, totalSize=36.9 K 2024-11-27T16:22:22,652 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting 9aa024b785044948a237cd5d82874b77, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=366, earliestPutTs=1732724539797 2024-11-27T16:22:22,652 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting 6d00eb408a18495e8c7f30b6de5f00f7, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=394, earliestPutTs=1732724539981 2024-11-27T16:22:22,653 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting a5ecf59ac00842d9bb5b8a69e1de1669, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=407, earliestPutTs=1732724540637 2024-11-27T16:22:22,659 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742177_1353 (size=12301) 2024-11-27T16:22:22,680 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): b7c24f821c64d1ed1608bef04711b574#C#compaction#303 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 53.85 MB/second 2024-11-27T16:22:22,680 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp/C/9657c83e9f6147ebabdda904bd0bef16 is 50, key is test_row_0/C:col10/1732724540637/Put/seqid=0 2024-11-27T16:22:22,713 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742178_1354 (size=13289) 2024-11-27T16:22:22,724 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp/C/9657c83e9f6147ebabdda904bd0bef16 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/C/9657c83e9f6147ebabdda904bd0bef16 2024-11-27T16:22:22,733 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in b7c24f821c64d1ed1608bef04711b574/C of b7c24f821c64d1ed1608bef04711b574 into 9657c83e9f6147ebabdda904bd0bef16(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T16:22:22,733 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for b7c24f821c64d1ed1608bef04711b574: 2024-11-27T16:22:22,734 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574., storeName=b7c24f821c64d1ed1608bef04711b574/C, priority=13, startTime=1732724542445; duration=0sec 2024-11-27T16:22:22,734 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T16:22:22,734 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b7c24f821c64d1ed1608bef04711b574:C 2024-11-27T16:22:23,019 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp/A/624b9ecd139341a6bff98258b07478de as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/A/624b9ecd139341a6bff98258b07478de 2024-11-27T16:22:23,030 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in b7c24f821c64d1ed1608bef04711b574/A of b7c24f821c64d1ed1608bef04711b574 into 624b9ecd139341a6bff98258b07478de(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T16:22:23,030 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for b7c24f821c64d1ed1608bef04711b574: 2024-11-27T16:22:23,030 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574., storeName=b7c24f821c64d1ed1608bef04711b574/A, priority=13, startTime=1732724542444; duration=0sec 2024-11-27T16:22:23,031 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T16:22:23,031 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b7c24f821c64d1ed1608bef04711b574:A 2024-11-27T16:22:23,060 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=431 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp/A/9e9951d516b34f038d5146105a396f56 2024-11-27T16:22:23,080 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp/B/faf017bba5f549dba89df1bec03251a4 is 50, key is test_row_0/B:col10/1732724541127/Put/seqid=0 2024-11-27T16:22:23,133 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742179_1355 (size=12301) 2024-11-27T16:22:23,134 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=431 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp/B/faf017bba5f549dba89df1bec03251a4 2024-11-27T16:22:23,155 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp/C/c5eba097dad54ba1a6af98e7905a7830 is 50, key is test_row_0/C:col10/1732724541127/Put/seqid=0 2024-11-27T16:22:23,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=83 2024-11-27T16:22:23,201 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742180_1356 (size=12301) 2024-11-27T16:22:23,205 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=431 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp/C/c5eba097dad54ba1a6af98e7905a7830 2024-11-27T16:22:23,211 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp/A/9e9951d516b34f038d5146105a396f56 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/A/9e9951d516b34f038d5146105a396f56 2024-11-27T16:22:23,216 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/A/9e9951d516b34f038d5146105a396f56, entries=150, sequenceid=431, filesize=12.0 K 2024-11-27T16:22:23,219 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp/B/faf017bba5f549dba89df1bec03251a4 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/B/faf017bba5f549dba89df1bec03251a4 2024-11-27T16:22:23,224 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/B/faf017bba5f549dba89df1bec03251a4, entries=150, sequenceid=431, filesize=12.0 K 2024-11-27T16:22:23,226 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp/C/c5eba097dad54ba1a6af98e7905a7830 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/C/c5eba097dad54ba1a6af98e7905a7830 2024-11-27T16:22:23,234 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/C/c5eba097dad54ba1a6af98e7905a7830, entries=150, sequenceid=431, filesize=12.0 K 2024-11-27T16:22:23,235 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=0 B/0 for b7c24f821c64d1ed1608bef04711b574 in 760ms, sequenceid=431, compaction requested=false 2024-11-27T16:22:23,236 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegion(2538): Flush status journal for b7c24f821c64d1ed1608bef04711b574: 2024-11-27T16:22:23,236 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574. 2024-11-27T16:22:23,236 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=84 2024-11-27T16:22:23,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4106): Remote procedure done, pid=84 2024-11-27T16:22:23,240 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=84, resume processing ppid=83 2024-11-27T16:22:23,240 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=84, ppid=83, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.1550 sec 2024-11-27T16:22:23,241 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=83, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=83, table=TestAcidGuarantees in 2.1610 sec 2024-11-27T16:22:23,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(8581): Flush requested on b7c24f821c64d1ed1608bef04711b574 2024-11-27T16:22:23,373 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing b7c24f821c64d1ed1608bef04711b574 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-27T16:22:23,373 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b7c24f821c64d1ed1608bef04711b574, store=A 2024-11-27T16:22:23,373 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:22:23,373 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b7c24f821c64d1ed1608bef04711b574, store=B 2024-11-27T16:22:23,373 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:22:23,373 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b7c24f821c64d1ed1608bef04711b574, store=C 2024-11-27T16:22:23,374 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:22:23,392 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp/A/a91ca1f4c41a4ac897d1afe0c334be31 is 50, key is test_row_0/A:col10/1732724543360/Put/seqid=0 2024-11-27T16:22:23,453 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742181_1357 (size=14741) 2024-11-27T16:22:23,530 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:23,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 253 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54228 deadline: 1732724603528, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:23,634 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:23,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 255 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54228 deadline: 1732724603632, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:23,841 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:23,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 257 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54228 deadline: 1732724603836, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:23,854 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=445 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp/A/a91ca1f4c41a4ac897d1afe0c334be31 2024-11-27T16:22:23,879 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp/B/c42697514c974c3c9041589aa162960c is 50, key is test_row_0/B:col10/1732724543360/Put/seqid=0 2024-11-27T16:22:23,917 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742182_1358 (size=12301) 2024-11-27T16:22:23,955 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:23,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54232 deadline: 1732724603951, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:23,956 DEBUG [Thread-1207 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=8, retries=16, started=18276 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574., hostname=7b191dec6496,44169,1732724452967, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-27T16:22:24,148 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:24,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 259 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54228 deadline: 1732724604144, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:24,318 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=445 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp/B/c42697514c974c3c9041589aa162960c 2024-11-27T16:22:24,337 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp/C/abbc1f5ff4174350966ead4fba040067 is 50, key is test_row_0/C:col10/1732724543360/Put/seqid=0 2024-11-27T16:22:24,391 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742183_1359 (size=12301) 2024-11-27T16:22:24,656 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:24,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 261 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54228 deadline: 1732724604652, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:24,792 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=445 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp/C/abbc1f5ff4174350966ead4fba040067 2024-11-27T16:22:24,797 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp/A/a91ca1f4c41a4ac897d1afe0c334be31 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/A/a91ca1f4c41a4ac897d1afe0c334be31 2024-11-27T16:22:24,808 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/A/a91ca1f4c41a4ac897d1afe0c334be31, entries=200, sequenceid=445, filesize=14.4 K 2024-11-27T16:22:24,809 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp/B/c42697514c974c3c9041589aa162960c as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/B/c42697514c974c3c9041589aa162960c 2024-11-27T16:22:24,815 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/B/c42697514c974c3c9041589aa162960c, entries=150, sequenceid=445, filesize=12.0 K 2024-11-27T16:22:24,816 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp/C/abbc1f5ff4174350966ead4fba040067 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/C/abbc1f5ff4174350966ead4fba040067 2024-11-27T16:22:24,819 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/C/abbc1f5ff4174350966ead4fba040067, entries=150, sequenceid=445, filesize=12.0 K 2024-11-27T16:22:24,820 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for b7c24f821c64d1ed1608bef04711b574 in 1447ms, sequenceid=445, compaction requested=true 2024-11-27T16:22:24,821 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for b7c24f821c64d1ed1608bef04711b574: 2024-11-27T16:22:24,821 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b7c24f821c64d1ed1608bef04711b574:A, priority=-2147483648, current under compaction store size is 1 2024-11-27T16:22:24,821 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T16:22:24,821 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-27T16:22:24,821 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-27T16:22:24,821 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b7c24f821c64d1ed1608bef04711b574:B, priority=-2147483648, current under compaction store size is 2 2024-11-27T16:22:24,821 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T16:22:24,821 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b7c24f821c64d1ed1608bef04711b574:C, priority=-2147483648, current under compaction store size is 3 2024-11-27T16:22:24,821 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-27T16:22:24,822 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 40331 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-27T16:22:24,822 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HStore(1540): b7c24f821c64d1ed1608bef04711b574/A is initiating minor compaction (all files) 2024-11-27T16:22:24,822 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of b7c24f821c64d1ed1608bef04711b574/A in TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574. 2024-11-27T16:22:24,822 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/A/624b9ecd139341a6bff98258b07478de, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/A/9e9951d516b34f038d5146105a396f56, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/A/a91ca1f4c41a4ac897d1afe0c334be31] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp, totalSize=39.4 K 2024-11-27T16:22:24,823 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37891 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-27T16:22:24,823 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 624b9ecd139341a6bff98258b07478de, keycount=150, bloomtype=ROW, size=13.0 K, encoding=NONE, compression=NONE, seqNum=407, earliestPutTs=1732724540637 2024-11-27T16:22:24,823 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HStore(1540): b7c24f821c64d1ed1608bef04711b574/B is initiating minor compaction (all files) 2024-11-27T16:22:24,823 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of b7c24f821c64d1ed1608bef04711b574/B in TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574. 2024-11-27T16:22:24,823 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/B/49266a4e6a94462d8a3d6f04c1d2caf8, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/B/faf017bba5f549dba89df1bec03251a4, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/B/c42697514c974c3c9041589aa162960c] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp, totalSize=37.0 K 2024-11-27T16:22:24,823 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9e9951d516b34f038d5146105a396f56, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=431, earliestPutTs=1732724541127 2024-11-27T16:22:24,823 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting 49266a4e6a94462d8a3d6f04c1d2caf8, keycount=150, bloomtype=ROW, size=13.0 K, encoding=NONE, compression=NONE, seqNum=407, earliestPutTs=1732724540637 2024-11-27T16:22:24,824 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.Compactor(224): Compacting a91ca1f4c41a4ac897d1afe0c334be31, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=445, earliestPutTs=1732724543343 2024-11-27T16:22:24,824 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting faf017bba5f549dba89df1bec03251a4, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=431, earliestPutTs=1732724541127 2024-11-27T16:22:24,824 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting c42697514c974c3c9041589aa162960c, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=445, earliestPutTs=1732724543351 2024-11-27T16:22:24,843 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): b7c24f821c64d1ed1608bef04711b574#A#compaction#309 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 53.85 MB/second 2024-11-27T16:22:24,844 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp/A/0a5fabe1d4d54c7196c93acfe3ec11b0 is 50, key is test_row_0/A:col10/1732724543360/Put/seqid=0 2024-11-27T16:22:24,858 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): b7c24f821c64d1ed1608bef04711b574#B#compaction#310 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 53.85 MB/second 2024-11-27T16:22:24,859 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp/B/1e4b7f3ac5c74059afef8cb10b5dd765 is 50, key is test_row_0/B:col10/1732724543360/Put/seqid=0 2024-11-27T16:22:24,923 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742184_1360 (size=13391) 2024-11-27T16:22:24,930 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp/A/0a5fabe1d4d54c7196c93acfe3ec11b0 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/A/0a5fabe1d4d54c7196c93acfe3ec11b0 2024-11-27T16:22:24,943 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in b7c24f821c64d1ed1608bef04711b574/A of b7c24f821c64d1ed1608bef04711b574 into 0a5fabe1d4d54c7196c93acfe3ec11b0(size=13.1 K), total size for store is 13.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T16:22:24,943 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for b7c24f821c64d1ed1608bef04711b574: 2024-11-27T16:22:24,943 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574., storeName=b7c24f821c64d1ed1608bef04711b574/A, priority=13, startTime=1732724544821; duration=0sec 2024-11-27T16:22:24,943 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-27T16:22:24,943 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b7c24f821c64d1ed1608bef04711b574:A 2024-11-27T16:22:24,943 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-27T16:22:24,944 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37891 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-27T16:22:24,945 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HStore(1540): b7c24f821c64d1ed1608bef04711b574/C is initiating minor compaction (all files) 2024-11-27T16:22:24,945 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of b7c24f821c64d1ed1608bef04711b574/C in TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574. 2024-11-27T16:22:24,945 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/C/9657c83e9f6147ebabdda904bd0bef16, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/C/c5eba097dad54ba1a6af98e7905a7830, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/C/abbc1f5ff4174350966ead4fba040067] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp, totalSize=37.0 K 2024-11-27T16:22:24,945 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9657c83e9f6147ebabdda904bd0bef16, keycount=150, bloomtype=ROW, size=13.0 K, encoding=NONE, compression=NONE, seqNum=407, earliestPutTs=1732724540637 2024-11-27T16:22:24,946 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.Compactor(224): Compacting c5eba097dad54ba1a6af98e7905a7830, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=431, earliestPutTs=1732724541127 2024-11-27T16:22:24,946 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.Compactor(224): Compacting abbc1f5ff4174350966ead4fba040067, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=445, earliestPutTs=1732724543351 2024-11-27T16:22:24,951 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742185_1361 (size=13391) 2024-11-27T16:22:24,966 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): b7c24f821c64d1ed1608bef04711b574#C#compaction#311 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 53.85 MB/second 2024-11-27T16:22:24,967 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp/C/cf4c76a68dae4ecca8e957a6bc9242c3 is 50, key is test_row_0/C:col10/1732724543360/Put/seqid=0 2024-11-27T16:22:25,021 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742186_1362 (size=13391) 2024-11-27T16:22:25,028 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp/C/cf4c76a68dae4ecca8e957a6bc9242c3 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/C/cf4c76a68dae4ecca8e957a6bc9242c3 2024-11-27T16:22:25,034 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in b7c24f821c64d1ed1608bef04711b574/C of b7c24f821c64d1ed1608bef04711b574 into cf4c76a68dae4ecca8e957a6bc9242c3(size=13.1 K), total size for store is 13.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T16:22:25,034 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for b7c24f821c64d1ed1608bef04711b574: 2024-11-27T16:22:25,034 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574., storeName=b7c24f821c64d1ed1608bef04711b574/C, priority=13, startTime=1732724544821; duration=0sec 2024-11-27T16:22:25,034 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T16:22:25,034 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b7c24f821c64d1ed1608bef04711b574:C 2024-11-27T16:22:25,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=83 2024-11-27T16:22:25,195 INFO [Thread-1213 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 83 completed 2024-11-27T16:22:25,197 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-27T16:22:25,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] procedure2.ProcedureExecutor(1098): Stored pid=85, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=85, table=TestAcidGuarantees 2024-11-27T16:22:25,199 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=85, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=85, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-27T16:22:25,200 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=85, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=85, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-27T16:22:25,200 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=86, ppid=85, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-27T16:22:25,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=85 2024-11-27T16:22:25,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=85 2024-11-27T16:22:25,354 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:22:25,355 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=86 2024-11-27T16:22:25,355 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574. 2024-11-27T16:22:25,355 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegion(2837): Flushing b7c24f821c64d1ed1608bef04711b574 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-27T16:22:25,356 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b7c24f821c64d1ed1608bef04711b574, store=A 2024-11-27T16:22:25,356 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:22:25,356 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b7c24f821c64d1ed1608bef04711b574, store=B 2024-11-27T16:22:25,356 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:22:25,356 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b7c24f821c64d1ed1608bef04711b574, store=C 2024-11-27T16:22:25,356 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:22:25,366 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp/B/1e4b7f3ac5c74059afef8cb10b5dd765 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/B/1e4b7f3ac5c74059afef8cb10b5dd765 2024-11-27T16:22:25,374 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp/A/8ee408c7a31b49bb9041151ae6eb97cb is 50, key is test_row_0/A:col10/1732724543478/Put/seqid=0 2024-11-27T16:22:25,383 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in b7c24f821c64d1ed1608bef04711b574/B of b7c24f821c64d1ed1608bef04711b574 into 1e4b7f3ac5c74059afef8cb10b5dd765(size=13.1 K), total size for store is 13.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T16:22:25,383 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for b7c24f821c64d1ed1608bef04711b574: 2024-11-27T16:22:25,383 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574., storeName=b7c24f821c64d1ed1608bef04711b574/B, priority=13, startTime=1732724544821; duration=0sec 2024-11-27T16:22:25,383 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T16:22:25,383 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b7c24f821c64d1ed1608bef04711b574:B 2024-11-27T16:22:25,421 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742187_1363 (size=12301) 2024-11-27T16:22:25,422 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=472 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp/A/8ee408c7a31b49bb9041151ae6eb97cb 2024-11-27T16:22:25,439 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp/B/793004a376e84ff8ae0a99db011ee0da is 50, key is test_row_0/B:col10/1732724543478/Put/seqid=0 2024-11-27T16:22:25,484 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742188_1364 (size=12301) 2024-11-27T16:22:25,485 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=472 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp/B/793004a376e84ff8ae0a99db011ee0da 2024-11-27T16:22:25,496 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp/C/f5311e2e899f4639bf18a4afbfc4a595 is 50, key is test_row_0/C:col10/1732724543478/Put/seqid=0 2024-11-27T16:22:25,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=85 2024-11-27T16:22:25,529 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742189_1365 (size=12301) 2024-11-27T16:22:25,666 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574. as already flushing 2024-11-27T16:22:25,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(8581): Flush requested on b7c24f821c64d1ed1608bef04711b574 2024-11-27T16:22:25,716 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:25,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 271 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54228 deadline: 1732724605713, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:25,745 DEBUG [Thread-1220 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x532e5d9f to 127.0.0.1:51088 2024-11-27T16:22:25,745 DEBUG [Thread-1218 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x61f33e78 to 127.0.0.1:51088 2024-11-27T16:22:25,745 DEBUG [Thread-1218 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-27T16:22:25,745 DEBUG [Thread-1220 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-27T16:22:25,746 DEBUG [Thread-1222 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x31dd347a to 127.0.0.1:51088 2024-11-27T16:22:25,746 DEBUG [Thread-1222 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-27T16:22:25,748 DEBUG [Thread-1216 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x3505ffc0 to 127.0.0.1:51088 2024-11-27T16:22:25,748 DEBUG [Thread-1216 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-27T16:22:25,749 DEBUG [Thread-1214 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x7dfc4f36 to 127.0.0.1:51088 2024-11-27T16:22:25,749 DEBUG [Thread-1214 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-27T16:22:25,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=85 2024-11-27T16:22:25,819 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:25,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 273 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54228 deadline: 1732724605819, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:25,930 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=472 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp/C/f5311e2e899f4639bf18a4afbfc4a595 2024-11-27T16:22:25,935 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp/A/8ee408c7a31b49bb9041151ae6eb97cb as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/A/8ee408c7a31b49bb9041151ae6eb97cb 2024-11-27T16:22:25,940 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/A/8ee408c7a31b49bb9041151ae6eb97cb, entries=150, sequenceid=472, filesize=12.0 K 2024-11-27T16:22:25,941 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp/B/793004a376e84ff8ae0a99db011ee0da as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/B/793004a376e84ff8ae0a99db011ee0da 2024-11-27T16:22:25,950 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/B/793004a376e84ff8ae0a99db011ee0da, entries=150, sequenceid=472, filesize=12.0 K 2024-11-27T16:22:25,956 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp/C/f5311e2e899f4639bf18a4afbfc4a595 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/C/f5311e2e899f4639bf18a4afbfc4a595 2024-11-27T16:22:25,961 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/C/f5311e2e899f4639bf18a4afbfc4a595, entries=150, sequenceid=472, filesize=12.0 K 2024-11-27T16:22:25,962 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for b7c24f821c64d1ed1608bef04711b574 in 607ms, sequenceid=472, compaction requested=false 2024-11-27T16:22:25,962 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegion(2538): Flush status journal for b7c24f821c64d1ed1608bef04711b574: 2024-11-27T16:22:25,962 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574. 2024-11-27T16:22:25,962 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=86 2024-11-27T16:22:25,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4106): Remote procedure done, pid=86 2024-11-27T16:22:25,965 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=86, resume processing ppid=85 2024-11-27T16:22:25,965 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=86, ppid=85, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 764 msec 2024-11-27T16:22:25,967 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=85, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=85, table=TestAcidGuarantees in 769 msec 2024-11-27T16:22:26,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(8581): Flush requested on b7c24f821c64d1ed1608bef04711b574 2024-11-27T16:22:26,025 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing b7c24f821c64d1ed1608bef04711b574 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-11-27T16:22:26,026 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b7c24f821c64d1ed1608bef04711b574, store=A 2024-11-27T16:22:26,026 DEBUG [Thread-1205 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x05b4256e to 127.0.0.1:51088 2024-11-27T16:22:26,026 DEBUG [Thread-1205 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-27T16:22:26,026 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:22:26,026 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b7c24f821c64d1ed1608bef04711b574, store=B 2024-11-27T16:22:26,026 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:22:26,026 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b7c24f821c64d1ed1608bef04711b574, store=C 2024-11-27T16:22:26,026 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:22:26,032 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp/A/500e873db459452497e0ee0fc43585d8 is 50, key is test_row_0/A:col10/1732724545704/Put/seqid=0 2024-11-27T16:22:26,053 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742190_1366 (size=9857) 2024-11-27T16:22:26,055 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=485 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp/A/500e873db459452497e0ee0fc43585d8 2024-11-27T16:22:26,069 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp/B/b98814ea008648fea7d05145adf66e84 is 50, key is test_row_0/B:col10/1732724545704/Put/seqid=0 2024-11-27T16:22:26,079 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742191_1367 (size=9857) 2024-11-27T16:22:26,080 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=485 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp/B/b98814ea008648fea7d05145adf66e84 2024-11-27T16:22:26,111 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp/C/d6d1bdb403e54311a167d0c9ece3070d is 50, key is test_row_0/C:col10/1732724545704/Put/seqid=0 2024-11-27T16:22:26,149 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742192_1368 (size=9857) 2024-11-27T16:22:26,151 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=485 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp/C/d6d1bdb403e54311a167d0c9ece3070d 2024-11-27T16:22:26,164 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp/A/500e873db459452497e0ee0fc43585d8 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/A/500e873db459452497e0ee0fc43585d8 2024-11-27T16:22:26,170 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/A/500e873db459452497e0ee0fc43585d8, entries=100, sequenceid=485, filesize=9.6 K 2024-11-27T16:22:26,172 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp/B/b98814ea008648fea7d05145adf66e84 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/B/b98814ea008648fea7d05145adf66e84 2024-11-27T16:22:26,176 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/B/b98814ea008648fea7d05145adf66e84, entries=100, sequenceid=485, filesize=9.6 K 2024-11-27T16:22:26,177 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp/C/d6d1bdb403e54311a167d0c9ece3070d as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/C/d6d1bdb403e54311a167d0c9ece3070d 2024-11-27T16:22:26,182 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/C/d6d1bdb403e54311a167d0c9ece3070d, entries=100, sequenceid=485, filesize=9.6 K 2024-11-27T16:22:26,184 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=0 B/0 for b7c24f821c64d1ed1608bef04711b574 in 158ms, sequenceid=485, compaction requested=true 2024-11-27T16:22:26,184 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for b7c24f821c64d1ed1608bef04711b574: 2024-11-27T16:22:26,184 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b7c24f821c64d1ed1608bef04711b574:A, priority=-2147483648, current under compaction store size is 1 2024-11-27T16:22:26,184 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T16:22:26,184 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b7c24f821c64d1ed1608bef04711b574:B, priority=-2147483648, current under compaction store size is 2 2024-11-27T16:22:26,184 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-27T16:22:26,184 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-27T16:22:26,184 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b7c24f821c64d1ed1608bef04711b574:C, priority=-2147483648, current under compaction store size is 3 2024-11-27T16:22:26,184 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-11-27T16:22:26,184 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-27T16:22:26,186 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 35549 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-27T16:22:26,186 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HStore(1540): b7c24f821c64d1ed1608bef04711b574/A is initiating minor compaction (all files) 2024-11-27T16:22:26,186 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of b7c24f821c64d1ed1608bef04711b574/A in TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574. 2024-11-27T16:22:26,186 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/A/0a5fabe1d4d54c7196c93acfe3ec11b0, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/A/8ee408c7a31b49bb9041151ae6eb97cb, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/A/500e873db459452497e0ee0fc43585d8] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp, totalSize=34.7 K 2024-11-27T16:22:26,186 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 35549 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-27T16:22:26,186 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HStore(1540): b7c24f821c64d1ed1608bef04711b574/B is initiating minor compaction (all files) 2024-11-27T16:22:26,186 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of b7c24f821c64d1ed1608bef04711b574/B in TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574. 2024-11-27T16:22:26,187 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/B/1e4b7f3ac5c74059afef8cb10b5dd765, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/B/793004a376e84ff8ae0a99db011ee0da, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/B/b98814ea008648fea7d05145adf66e84] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp, totalSize=34.7 K 2024-11-27T16:22:26,187 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0a5fabe1d4d54c7196c93acfe3ec11b0, keycount=150, bloomtype=ROW, size=13.1 K, encoding=NONE, compression=NONE, seqNum=445, earliestPutTs=1732724543351 2024-11-27T16:22:26,187 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting 1e4b7f3ac5c74059afef8cb10b5dd765, keycount=150, bloomtype=ROW, size=13.1 K, encoding=NONE, compression=NONE, seqNum=445, earliestPutTs=1732724543351 2024-11-27T16:22:26,187 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8ee408c7a31b49bb9041151ae6eb97cb, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=472, earliestPutTs=1732724543478 2024-11-27T16:22:26,187 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting 793004a376e84ff8ae0a99db011ee0da, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=472, earliestPutTs=1732724543478 2024-11-27T16:22:26,188 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 500e873db459452497e0ee0fc43585d8, keycount=100, bloomtype=ROW, size=9.6 K, encoding=NONE, compression=NONE, seqNum=485, earliestPutTs=1732724545704 2024-11-27T16:22:26,188 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting b98814ea008648fea7d05145adf66e84, keycount=100, bloomtype=ROW, size=9.6 K, encoding=NONE, compression=NONE, seqNum=485, earliestPutTs=1732724545704 2024-11-27T16:22:26,208 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): b7c24f821c64d1ed1608bef04711b574#A#compaction#318 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 53.85 MB/second 2024-11-27T16:22:26,209 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp/A/e22fedd60ba14d30873a30a56c0ef671 is 50, key is test_row_0/A:col10/1732724545704/Put/seqid=0 2024-11-27T16:22:26,222 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): b7c24f821c64d1ed1608bef04711b574#B#compaction#319 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 53.85 MB/second 2024-11-27T16:22:26,223 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp/B/4e18dbeac2e44859ab8851a2bb62d9e5 is 50, key is test_row_0/B:col10/1732724545704/Put/seqid=0 2024-11-27T16:22:26,249 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742193_1369 (size=13493) 2024-11-27T16:22:26,254 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742194_1370 (size=13493) 2024-11-27T16:22:26,255 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp/A/e22fedd60ba14d30873a30a56c0ef671 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/A/e22fedd60ba14d30873a30a56c0ef671 2024-11-27T16:22:26,261 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in b7c24f821c64d1ed1608bef04711b574/A of b7c24f821c64d1ed1608bef04711b574 into e22fedd60ba14d30873a30a56c0ef671(size=13.2 K), total size for store is 13.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T16:22:26,261 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for b7c24f821c64d1ed1608bef04711b574: 2024-11-27T16:22:26,261 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574., storeName=b7c24f821c64d1ed1608bef04711b574/A, priority=13, startTime=1732724546184; duration=0sec 2024-11-27T16:22:26,261 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-27T16:22:26,262 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b7c24f821c64d1ed1608bef04711b574:A 2024-11-27T16:22:26,262 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-27T16:22:26,266 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp/B/4e18dbeac2e44859ab8851a2bb62d9e5 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/B/4e18dbeac2e44859ab8851a2bb62d9e5 2024-11-27T16:22:26,267 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 35549 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-27T16:22:26,267 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HStore(1540): b7c24f821c64d1ed1608bef04711b574/C is initiating minor compaction (all files) 2024-11-27T16:22:26,267 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of b7c24f821c64d1ed1608bef04711b574/C in TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574. 2024-11-27T16:22:26,267 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/C/cf4c76a68dae4ecca8e957a6bc9242c3, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/C/f5311e2e899f4639bf18a4afbfc4a595, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/C/d6d1bdb403e54311a167d0c9ece3070d] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp, totalSize=34.7 K 2024-11-27T16:22:26,268 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.Compactor(224): Compacting cf4c76a68dae4ecca8e957a6bc9242c3, keycount=150, bloomtype=ROW, size=13.1 K, encoding=NONE, compression=NONE, seqNum=445, earliestPutTs=1732724543351 2024-11-27T16:22:26,268 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.Compactor(224): Compacting f5311e2e899f4639bf18a4afbfc4a595, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=472, earliestPutTs=1732724543478 2024-11-27T16:22:26,269 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.Compactor(224): Compacting d6d1bdb403e54311a167d0c9ece3070d, keycount=100, bloomtype=ROW, size=9.6 K, encoding=NONE, compression=NONE, seqNum=485, earliestPutTs=1732724545704 2024-11-27T16:22:26,273 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in b7c24f821c64d1ed1608bef04711b574/B of b7c24f821c64d1ed1608bef04711b574 into 4e18dbeac2e44859ab8851a2bb62d9e5(size=13.2 K), total size for store is 13.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T16:22:26,273 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for b7c24f821c64d1ed1608bef04711b574: 2024-11-27T16:22:26,273 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574., storeName=b7c24f821c64d1ed1608bef04711b574/B, priority=13, startTime=1732724546184; duration=0sec 2024-11-27T16:22:26,273 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T16:22:26,273 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b7c24f821c64d1ed1608bef04711b574:B 2024-11-27T16:22:26,294 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): b7c24f821c64d1ed1608bef04711b574#C#compaction#320 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 53.85 MB/second 2024-11-27T16:22:26,294 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp/C/b2438964b65e453a8f6f905b08478b95 is 50, key is test_row_0/C:col10/1732724545704/Put/seqid=0 2024-11-27T16:22:26,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=85 2024-11-27T16:22:26,305 INFO [Thread-1213 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 85 completed 2024-11-27T16:22:26,323 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742195_1371 (size=13493) 2024-11-27T16:22:26,330 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp/C/b2438964b65e453a8f6f905b08478b95 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/C/b2438964b65e453a8f6f905b08478b95 2024-11-27T16:22:26,336 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in b7c24f821c64d1ed1608bef04711b574/C of b7c24f821c64d1ed1608bef04711b574 into b2438964b65e453a8f6f905b08478b95(size=13.2 K), total size for store is 13.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T16:22:26,336 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for b7c24f821c64d1ed1608bef04711b574: 2024-11-27T16:22:26,336 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574., storeName=b7c24f821c64d1ed1608bef04711b574/C, priority=13, startTime=1732724546184; duration=0sec 2024-11-27T16:22:26,336 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T16:22:26,336 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b7c24f821c64d1ed1608bef04711b574:C 2024-11-27T16:22:30,256 DEBUG [Thread-1209 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x62de434f to 127.0.0.1:51088 2024-11-27T16:22:30,256 DEBUG [Thread-1209 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-27T16:22:30,291 DEBUG [Thread-1211 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x07575b91 to 127.0.0.1:51088 2024-11-27T16:22:30,292 DEBUG [Thread-1211 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-27T16:22:30,332 DEBUG [Thread-1203 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x63a751b9 to 127.0.0.1:51088 2024-11-27T16:22:30,332 DEBUG [Thread-1203 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-27T16:22:33,959 DEBUG [Thread-1207 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x18ed3e4c to 127.0.0.1:51088 2024-11-27T16:22:33,959 DEBUG [Thread-1207 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-27T16:22:33,960 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(392): Finished test. Writers: 2024-11-27T16:22:33,960 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 80 2024-11-27T16:22:33,960 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 190 2024-11-27T16:22:33,960 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 1 2024-11-27T16:22:33,960 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 61 2024-11-27T16:22:33,960 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 51 2024-11-27T16:22:33,960 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(396): Readers: 2024-11-27T16:22:33,960 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 4308 2024-11-27T16:22:33,960 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 4145 2024-11-27T16:22:33,960 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 4127 2024-11-27T16:22:33,960 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 4283 2024-11-27T16:22:33,960 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 4144 2024-11-27T16:22:33,960 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(400): Scanners: 2024-11-27T16:22:33,960 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-11-27T16:22:33,960 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x473dfbd2 to 127.0.0.1:51088 2024-11-27T16:22:33,960 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-27T16:22:33,961 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of TestAcidGuarantees 2024-11-27T16:22:33,961 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable TestAcidGuarantees 2024-11-27T16:22:33,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] procedure2.ProcedureExecutor(1098): Stored pid=87, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=TestAcidGuarantees 2024-11-27T16:22:33,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=87 2024-11-27T16:22:33,965 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732724553965"}]},"ts":"1732724553965"} 2024-11-27T16:22:33,966 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLING in hbase:meta 2024-11-27T16:22:33,968 INFO [PEWorker-4 {}] procedure.DisableTableProcedure(284): Set TestAcidGuarantees to state=DISABLING 2024-11-27T16:22:33,969 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=88, ppid=87, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=TestAcidGuarantees}] 2024-11-27T16:22:33,970 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=89, ppid=88, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=b7c24f821c64d1ed1608bef04711b574, UNASSIGN}] 2024-11-27T16:22:33,970 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=89, ppid=88, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=b7c24f821c64d1ed1608bef04711b574, UNASSIGN 2024-11-27T16:22:33,971 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=89 updating hbase:meta row=b7c24f821c64d1ed1608bef04711b574, regionState=CLOSING, regionLocation=7b191dec6496,44169,1732724452967 2024-11-27T16:22:33,972 DEBUG [PEWorker-1 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-27T16:22:33,972 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=90, ppid=89, state=RUNNABLE; CloseRegionProcedure b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967}] 2024-11-27T16:22:34,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=87 2024-11-27T16:22:34,123 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:22:34,124 INFO [RS_CLOSE_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_CLOSE_REGION, pid=90}] handler.UnassignRegionHandler(124): Close b7c24f821c64d1ed1608bef04711b574 2024-11-27T16:22:34,124 DEBUG [RS_CLOSE_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_CLOSE_REGION, pid=90}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-11-27T16:22:34,124 DEBUG [RS_CLOSE_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_CLOSE_REGION, pid=90}] regionserver.HRegion(1681): Closing b7c24f821c64d1ed1608bef04711b574, disabling compactions & flushes 2024-11-27T16:22:34,124 INFO [RS_CLOSE_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_CLOSE_REGION, pid=90}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574. 2024-11-27T16:22:34,124 DEBUG [RS_CLOSE_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_CLOSE_REGION, pid=90}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574. 2024-11-27T16:22:34,124 DEBUG [RS_CLOSE_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_CLOSE_REGION, pid=90}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574. after waiting 0 ms 2024-11-27T16:22:34,124 DEBUG [RS_CLOSE_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_CLOSE_REGION, pid=90}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574. 2024-11-27T16:22:34,124 INFO [RS_CLOSE_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_CLOSE_REGION, pid=90}] regionserver.HRegion(2837): Flushing b7c24f821c64d1ed1608bef04711b574 3/3 column families, dataSize=26.84 KB heapSize=71.06 KB 2024-11-27T16:22:34,124 DEBUG [RS_CLOSE_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_CLOSE_REGION, pid=90}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b7c24f821c64d1ed1608bef04711b574, store=A 2024-11-27T16:22:34,125 DEBUG [RS_CLOSE_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_CLOSE_REGION, pid=90}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:22:34,125 DEBUG [RS_CLOSE_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_CLOSE_REGION, pid=90}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b7c24f821c64d1ed1608bef04711b574, store=B 2024-11-27T16:22:34,125 DEBUG [RS_CLOSE_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_CLOSE_REGION, pid=90}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:22:34,125 DEBUG [RS_CLOSE_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_CLOSE_REGION, pid=90}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b7c24f821c64d1ed1608bef04711b574, store=C 2024-11-27T16:22:34,125 DEBUG [RS_CLOSE_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_CLOSE_REGION, pid=90}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:22:34,129 DEBUG [RS_CLOSE_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_CLOSE_REGION, pid=90}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp/A/da910625906c4fec89e8e3f421346364 is 50, key is test_row_1/A:col10/1732724553958/Put/seqid=0 2024-11-27T16:22:34,159 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742196_1372 (size=9857) 2024-11-27T16:22:34,159 INFO [RS_CLOSE_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_CLOSE_REGION, pid=90}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.95 KB at sequenceid=495 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp/A/da910625906c4fec89e8e3f421346364 2024-11-27T16:22:34,170 DEBUG [RS_CLOSE_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_CLOSE_REGION, pid=90}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp/B/12ad51ba49c5468eb5ef31205456550c is 50, key is test_row_1/B:col10/1732724553958/Put/seqid=0 2024-11-27T16:22:34,189 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742197_1373 (size=9857) 2024-11-27T16:22:34,190 INFO [RS_CLOSE_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_CLOSE_REGION, pid=90}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.95 KB at sequenceid=495 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp/B/12ad51ba49c5468eb5ef31205456550c 2024-11-27T16:22:34,197 DEBUG [RS_CLOSE_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_CLOSE_REGION, pid=90}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp/C/3e4bf9c81d8e494f9eae1bfb9bb688e8 is 50, key is test_row_1/C:col10/1732724553958/Put/seqid=0 2024-11-27T16:22:34,201 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742198_1374 (size=9857) 2024-11-27T16:22:34,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=87 2024-11-27T16:22:34,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=87 2024-11-27T16:22:34,602 INFO [RS_CLOSE_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_CLOSE_REGION, pid=90}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.95 KB at sequenceid=495 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp/C/3e4bf9c81d8e494f9eae1bfb9bb688e8 2024-11-27T16:22:34,607 DEBUG [RS_CLOSE_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_CLOSE_REGION, pid=90}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp/A/da910625906c4fec89e8e3f421346364 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/A/da910625906c4fec89e8e3f421346364 2024-11-27T16:22:34,612 INFO [RS_CLOSE_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_CLOSE_REGION, pid=90}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/A/da910625906c4fec89e8e3f421346364, entries=100, sequenceid=495, filesize=9.6 K 2024-11-27T16:22:34,613 DEBUG [RS_CLOSE_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_CLOSE_REGION, pid=90}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp/B/12ad51ba49c5468eb5ef31205456550c as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/B/12ad51ba49c5468eb5ef31205456550c 2024-11-27T16:22:34,618 INFO [RS_CLOSE_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_CLOSE_REGION, pid=90}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/B/12ad51ba49c5468eb5ef31205456550c, entries=100, sequenceid=495, filesize=9.6 K 2024-11-27T16:22:34,619 DEBUG [RS_CLOSE_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_CLOSE_REGION, pid=90}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/.tmp/C/3e4bf9c81d8e494f9eae1bfb9bb688e8 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/C/3e4bf9c81d8e494f9eae1bfb9bb688e8 2024-11-27T16:22:34,623 INFO [RS_CLOSE_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_CLOSE_REGION, pid=90}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/C/3e4bf9c81d8e494f9eae1bfb9bb688e8, entries=100, sequenceid=495, filesize=9.6 K 2024-11-27T16:22:34,624 INFO [RS_CLOSE_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_CLOSE_REGION, pid=90}] regionserver.HRegion(3040): Finished flush of dataSize ~26.84 KB/27480, heapSize ~71.02 KB/72720, currentSize=0 B/0 for b7c24f821c64d1ed1608bef04711b574 in 499ms, sequenceid=495, compaction requested=false 2024-11-27T16:22:34,624 DEBUG [StoreCloser-TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/A/4e78b58eeac441dc825ff749aec29387, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/A/8cd593c8ebd54ffd91eea37108837286, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/A/b54f9db04aa94fb09f6ef783367fc4d5, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/A/4fb7ded2bd00458daca6d35ef8c085bb, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/A/8de5f716b98d42a785db0313ffa02aa9, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/A/0f021abf0a7a44fb92a0989baec0189c, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/A/4369c3d48889435783f3fdc5cfbbbe02, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/A/7150e27bc9714340a87474ebda76a204, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/A/881f45e681c942f9b55872138dbcf7b3, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/A/2404d095608244e6975e9446c10e76f9, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/A/85fd439bed2543da809075c3b7436ede, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/A/f05337dfa31c4b749a621362c83259cd, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/A/7c2aeb01bbaf4b00b63ae6f14903cc0f, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/A/9662442ba947476f9ccd1d3e7d4d8d6e, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/A/92e5e329c1cb4ea4abe6350fb58a1697, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/A/a956ea6c672740d281884e6d08b82460, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/A/026f83a2ada548c39ad61db95f5cde2c, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/A/6395bc582d4d4f5187e4b66de81df8d3, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/A/552f8d64d15845dd933caf9b9e9b68e1, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/A/4f176e56bdce40cabcc32557bddd8d5b, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/A/a5f11e05584945f99ca769344b553a5c, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/A/818ff8c7ba41408e883ccbe7f9e7d6c7, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/A/53a4ae47f310407a8a064cc393133148, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/A/7740bdb667df42ab9455c3297b6f0d2b, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/A/369a2c928dd84ce7b69ca0a7ba210753, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/A/80b8b22623464778a47efa749e7cd57b, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/A/952f894e079f40aa8cb43e1aafc80f25, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/A/2ea8b608f07645729438f43b1a095ec7, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/A/cd0084a7d4244ed69cb9ca9407aa9c67, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/A/624b9ecd139341a6bff98258b07478de, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/A/9e9951d516b34f038d5146105a396f56, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/A/a91ca1f4c41a4ac897d1afe0c334be31, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/A/0a5fabe1d4d54c7196c93acfe3ec11b0, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/A/8ee408c7a31b49bb9041151ae6eb97cb, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/A/500e873db459452497e0ee0fc43585d8] to archive 2024-11-27T16:22:34,626 DEBUG [StoreCloser-TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-27T16:22:34,629 DEBUG [StoreCloser-TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/A/4e78b58eeac441dc825ff749aec29387 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/A/4e78b58eeac441dc825ff749aec29387 2024-11-27T16:22:34,630 DEBUG [StoreCloser-TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/A/8cd593c8ebd54ffd91eea37108837286 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/A/8cd593c8ebd54ffd91eea37108837286 2024-11-27T16:22:34,632 DEBUG [StoreCloser-TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/A/b54f9db04aa94fb09f6ef783367fc4d5 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/A/b54f9db04aa94fb09f6ef783367fc4d5 2024-11-27T16:22:34,633 DEBUG [StoreCloser-TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/A/4fb7ded2bd00458daca6d35ef8c085bb to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/A/4fb7ded2bd00458daca6d35ef8c085bb 2024-11-27T16:22:34,635 DEBUG [StoreCloser-TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/A/8de5f716b98d42a785db0313ffa02aa9 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/A/8de5f716b98d42a785db0313ffa02aa9 2024-11-27T16:22:34,636 DEBUG [StoreCloser-TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/A/0f021abf0a7a44fb92a0989baec0189c to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/A/0f021abf0a7a44fb92a0989baec0189c 2024-11-27T16:22:34,637 DEBUG [StoreCloser-TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/A/4369c3d48889435783f3fdc5cfbbbe02 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/A/4369c3d48889435783f3fdc5cfbbbe02 2024-11-27T16:22:34,637 DEBUG [StoreCloser-TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/A/7150e27bc9714340a87474ebda76a204 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/A/7150e27bc9714340a87474ebda76a204 2024-11-27T16:22:34,638 DEBUG [StoreCloser-TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/A/881f45e681c942f9b55872138dbcf7b3 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/A/881f45e681c942f9b55872138dbcf7b3 2024-11-27T16:22:34,640 DEBUG [StoreCloser-TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/A/2404d095608244e6975e9446c10e76f9 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/A/2404d095608244e6975e9446c10e76f9 2024-11-27T16:22:34,641 DEBUG [StoreCloser-TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/A/85fd439bed2543da809075c3b7436ede to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/A/85fd439bed2543da809075c3b7436ede 2024-11-27T16:22:34,642 DEBUG [StoreCloser-TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/A/f05337dfa31c4b749a621362c83259cd to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/A/f05337dfa31c4b749a621362c83259cd 2024-11-27T16:22:34,643 DEBUG [StoreCloser-TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/A/7c2aeb01bbaf4b00b63ae6f14903cc0f to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/A/7c2aeb01bbaf4b00b63ae6f14903cc0f 2024-11-27T16:22:34,644 DEBUG [StoreCloser-TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/A/9662442ba947476f9ccd1d3e7d4d8d6e to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/A/9662442ba947476f9ccd1d3e7d4d8d6e 2024-11-27T16:22:34,645 DEBUG [StoreCloser-TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/A/92e5e329c1cb4ea4abe6350fb58a1697 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/A/92e5e329c1cb4ea4abe6350fb58a1697 2024-11-27T16:22:34,646 DEBUG [StoreCloser-TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/A/a956ea6c672740d281884e6d08b82460 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/A/a956ea6c672740d281884e6d08b82460 2024-11-27T16:22:34,647 DEBUG [StoreCloser-TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/A/026f83a2ada548c39ad61db95f5cde2c to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/A/026f83a2ada548c39ad61db95f5cde2c 2024-11-27T16:22:34,648 DEBUG [StoreCloser-TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/A/6395bc582d4d4f5187e4b66de81df8d3 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/A/6395bc582d4d4f5187e4b66de81df8d3 2024-11-27T16:22:34,649 DEBUG [StoreCloser-TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/A/552f8d64d15845dd933caf9b9e9b68e1 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/A/552f8d64d15845dd933caf9b9e9b68e1 2024-11-27T16:22:34,650 DEBUG [StoreCloser-TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/A/4f176e56bdce40cabcc32557bddd8d5b to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/A/4f176e56bdce40cabcc32557bddd8d5b 2024-11-27T16:22:34,652 DEBUG [StoreCloser-TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/A/a5f11e05584945f99ca769344b553a5c to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/A/a5f11e05584945f99ca769344b553a5c 2024-11-27T16:22:34,653 DEBUG [StoreCloser-TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/A/818ff8c7ba41408e883ccbe7f9e7d6c7 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/A/818ff8c7ba41408e883ccbe7f9e7d6c7 2024-11-27T16:22:34,654 DEBUG [StoreCloser-TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/A/53a4ae47f310407a8a064cc393133148 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/A/53a4ae47f310407a8a064cc393133148 2024-11-27T16:22:34,655 DEBUG [StoreCloser-TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/A/7740bdb667df42ab9455c3297b6f0d2b to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/A/7740bdb667df42ab9455c3297b6f0d2b 2024-11-27T16:22:34,656 DEBUG [StoreCloser-TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/A/369a2c928dd84ce7b69ca0a7ba210753 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/A/369a2c928dd84ce7b69ca0a7ba210753 2024-11-27T16:22:34,657 DEBUG [StoreCloser-TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/A/80b8b22623464778a47efa749e7cd57b to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/A/80b8b22623464778a47efa749e7cd57b 2024-11-27T16:22:34,657 DEBUG [StoreCloser-TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/A/952f894e079f40aa8cb43e1aafc80f25 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/A/952f894e079f40aa8cb43e1aafc80f25 2024-11-27T16:22:34,658 DEBUG [StoreCloser-TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/A/2ea8b608f07645729438f43b1a095ec7 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/A/2ea8b608f07645729438f43b1a095ec7 2024-11-27T16:22:34,659 DEBUG [StoreCloser-TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/A/cd0084a7d4244ed69cb9ca9407aa9c67 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/A/cd0084a7d4244ed69cb9ca9407aa9c67 2024-11-27T16:22:34,660 DEBUG [StoreCloser-TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/A/624b9ecd139341a6bff98258b07478de to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/A/624b9ecd139341a6bff98258b07478de 2024-11-27T16:22:34,661 DEBUG [StoreCloser-TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/A/9e9951d516b34f038d5146105a396f56 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/A/9e9951d516b34f038d5146105a396f56 2024-11-27T16:22:34,662 DEBUG [StoreCloser-TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/A/a91ca1f4c41a4ac897d1afe0c334be31 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/A/a91ca1f4c41a4ac897d1afe0c334be31 2024-11-27T16:22:34,662 DEBUG [StoreCloser-TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/A/0a5fabe1d4d54c7196c93acfe3ec11b0 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/A/0a5fabe1d4d54c7196c93acfe3ec11b0 2024-11-27T16:22:34,663 DEBUG [StoreCloser-TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/A/8ee408c7a31b49bb9041151ae6eb97cb to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/A/8ee408c7a31b49bb9041151ae6eb97cb 2024-11-27T16:22:34,664 DEBUG [StoreCloser-TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/A/500e873db459452497e0ee0fc43585d8 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/A/500e873db459452497e0ee0fc43585d8 2024-11-27T16:22:34,666 DEBUG [StoreCloser-TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/B/26ec322b7e9b401cbdb31aa337748eb2, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/B/52b1bd2d6ff64bce95bd99a81012fbe3, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/B/8b4e9d3c36ab4652bde3c0550dff57f3, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/B/2913bfac8992482b90a9c90794354571, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/B/4189e9ad5d4544c1a7bfc5427f0bddb1, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/B/a070bef300bd459aa9b05a97b0138220, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/B/8c1c44b782304801bec31dc57b129761, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/B/fc4dec5df0fd4169bb5d3c3b98a566e5, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/B/8f6d613ae31f48baacf7087f205a370a, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/B/fd391f769de44ae8a43fa5d46ac5795b, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/B/df789d29b260439e8485b4ea2ec0f30d, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/B/7c0c6d99391e48a6ad4900213b74bf0c, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/B/2fbffab0024645f5a8f3a87b4a9fd36d, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/B/6325f3e0cbba4917a3990e2bc158db33, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/B/80e761e651f943cfa3e7f5c35e76dc65, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/B/14118868d9e74bd7979191fbbcf5e13d, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/B/b0429a334b4a44e4bd70d9c3444c4588, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/B/bad35449d9ba4f3abf1114348c433565, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/B/46297d77e113467eadbb80541de96968, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/B/c31bc6079c1942719f5173c9559b727d, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/B/25595b2fdf7c4e89940fefd5c454ab68, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/B/ec7c64ad8f6c4d3da9eed1374f97d442, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/B/0a7715e1ff124749a50d50de23f79425, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/B/122852e5075045b48f4c7e2f91bc0f09, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/B/26c8e01f0cc446bca01a9e1d133f8ab5, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/B/a2885890f8fa4430a35c119b3f269105, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/B/00ed872b45bc4149a9d812bc0b354826, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/B/71fee095526d41359c5eb7772cb577a7, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/B/49266a4e6a94462d8a3d6f04c1d2caf8, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/B/3c480c5bc38e43c5a06c9e6b934e539b, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/B/faf017bba5f549dba89df1bec03251a4, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/B/1e4b7f3ac5c74059afef8cb10b5dd765, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/B/c42697514c974c3c9041589aa162960c, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/B/793004a376e84ff8ae0a99db011ee0da, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/B/b98814ea008648fea7d05145adf66e84] to archive 2024-11-27T16:22:34,667 DEBUG [StoreCloser-TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-27T16:22:34,668 DEBUG [StoreCloser-TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/B/26ec322b7e9b401cbdb31aa337748eb2 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/B/26ec322b7e9b401cbdb31aa337748eb2 2024-11-27T16:22:34,669 DEBUG [StoreCloser-TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/B/52b1bd2d6ff64bce95bd99a81012fbe3 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/B/52b1bd2d6ff64bce95bd99a81012fbe3 2024-11-27T16:22:34,670 DEBUG [StoreCloser-TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/B/8b4e9d3c36ab4652bde3c0550dff57f3 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/B/8b4e9d3c36ab4652bde3c0550dff57f3 2024-11-27T16:22:34,670 DEBUG [StoreCloser-TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/B/2913bfac8992482b90a9c90794354571 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/B/2913bfac8992482b90a9c90794354571 2024-11-27T16:22:34,671 DEBUG [StoreCloser-TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/B/4189e9ad5d4544c1a7bfc5427f0bddb1 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/B/4189e9ad5d4544c1a7bfc5427f0bddb1 2024-11-27T16:22:34,672 DEBUG [StoreCloser-TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/B/a070bef300bd459aa9b05a97b0138220 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/B/a070bef300bd459aa9b05a97b0138220 2024-11-27T16:22:34,673 DEBUG [StoreCloser-TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/B/8c1c44b782304801bec31dc57b129761 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/B/8c1c44b782304801bec31dc57b129761 2024-11-27T16:22:34,674 DEBUG [StoreCloser-TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/B/fc4dec5df0fd4169bb5d3c3b98a566e5 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/B/fc4dec5df0fd4169bb5d3c3b98a566e5 2024-11-27T16:22:34,675 DEBUG [StoreCloser-TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/B/8f6d613ae31f48baacf7087f205a370a to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/B/8f6d613ae31f48baacf7087f205a370a 2024-11-27T16:22:34,675 DEBUG [StoreCloser-TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/B/fd391f769de44ae8a43fa5d46ac5795b to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/B/fd391f769de44ae8a43fa5d46ac5795b 2024-11-27T16:22:34,676 DEBUG [StoreCloser-TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/B/df789d29b260439e8485b4ea2ec0f30d to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/B/df789d29b260439e8485b4ea2ec0f30d 2024-11-27T16:22:34,677 DEBUG [StoreCloser-TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/B/7c0c6d99391e48a6ad4900213b74bf0c to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/B/7c0c6d99391e48a6ad4900213b74bf0c 2024-11-27T16:22:34,678 DEBUG [StoreCloser-TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/B/2fbffab0024645f5a8f3a87b4a9fd36d to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/B/2fbffab0024645f5a8f3a87b4a9fd36d 2024-11-27T16:22:34,679 DEBUG [StoreCloser-TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/B/6325f3e0cbba4917a3990e2bc158db33 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/B/6325f3e0cbba4917a3990e2bc158db33 2024-11-27T16:22:34,680 DEBUG [StoreCloser-TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/B/80e761e651f943cfa3e7f5c35e76dc65 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/B/80e761e651f943cfa3e7f5c35e76dc65 2024-11-27T16:22:34,681 DEBUG [StoreCloser-TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/B/14118868d9e74bd7979191fbbcf5e13d to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/B/14118868d9e74bd7979191fbbcf5e13d 2024-11-27T16:22:34,682 DEBUG [StoreCloser-TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/B/b0429a334b4a44e4bd70d9c3444c4588 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/B/b0429a334b4a44e4bd70d9c3444c4588 2024-11-27T16:22:34,683 DEBUG [StoreCloser-TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/B/bad35449d9ba4f3abf1114348c433565 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/B/bad35449d9ba4f3abf1114348c433565 2024-11-27T16:22:34,684 DEBUG [StoreCloser-TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/B/46297d77e113467eadbb80541de96968 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/B/46297d77e113467eadbb80541de96968 2024-11-27T16:22:34,685 DEBUG [StoreCloser-TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/B/c31bc6079c1942719f5173c9559b727d to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/B/c31bc6079c1942719f5173c9559b727d 2024-11-27T16:22:34,686 DEBUG [StoreCloser-TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/B/25595b2fdf7c4e89940fefd5c454ab68 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/B/25595b2fdf7c4e89940fefd5c454ab68 2024-11-27T16:22:34,687 DEBUG [StoreCloser-TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/B/ec7c64ad8f6c4d3da9eed1374f97d442 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/B/ec7c64ad8f6c4d3da9eed1374f97d442 2024-11-27T16:22:34,688 DEBUG [StoreCloser-TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/B/0a7715e1ff124749a50d50de23f79425 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/B/0a7715e1ff124749a50d50de23f79425 2024-11-27T16:22:34,690 DEBUG [StoreCloser-TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/B/122852e5075045b48f4c7e2f91bc0f09 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/B/122852e5075045b48f4c7e2f91bc0f09 2024-11-27T16:22:34,691 DEBUG [StoreCloser-TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/B/26c8e01f0cc446bca01a9e1d133f8ab5 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/B/26c8e01f0cc446bca01a9e1d133f8ab5 2024-11-27T16:22:34,692 DEBUG [StoreCloser-TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/B/a2885890f8fa4430a35c119b3f269105 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/B/a2885890f8fa4430a35c119b3f269105 2024-11-27T16:22:34,693 DEBUG [StoreCloser-TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/B/00ed872b45bc4149a9d812bc0b354826 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/B/00ed872b45bc4149a9d812bc0b354826 2024-11-27T16:22:34,694 DEBUG [StoreCloser-TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/B/71fee095526d41359c5eb7772cb577a7 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/B/71fee095526d41359c5eb7772cb577a7 2024-11-27T16:22:34,695 DEBUG [StoreCloser-TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/B/49266a4e6a94462d8a3d6f04c1d2caf8 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/B/49266a4e6a94462d8a3d6f04c1d2caf8 2024-11-27T16:22:34,697 DEBUG [StoreCloser-TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/B/3c480c5bc38e43c5a06c9e6b934e539b to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/B/3c480c5bc38e43c5a06c9e6b934e539b 2024-11-27T16:22:34,698 DEBUG [StoreCloser-TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/B/faf017bba5f549dba89df1bec03251a4 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/B/faf017bba5f549dba89df1bec03251a4 2024-11-27T16:22:34,699 DEBUG [StoreCloser-TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/B/1e4b7f3ac5c74059afef8cb10b5dd765 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/B/1e4b7f3ac5c74059afef8cb10b5dd765 2024-11-27T16:22:34,700 DEBUG [StoreCloser-TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/B/c42697514c974c3c9041589aa162960c to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/B/c42697514c974c3c9041589aa162960c 2024-11-27T16:22:34,701 DEBUG [StoreCloser-TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/B/793004a376e84ff8ae0a99db011ee0da to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/B/793004a376e84ff8ae0a99db011ee0da 2024-11-27T16:22:34,701 DEBUG [StoreCloser-TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/B/b98814ea008648fea7d05145adf66e84 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/B/b98814ea008648fea7d05145adf66e84 2024-11-27T16:22:34,703 DEBUG [StoreCloser-TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/C/c7b2d77f530548e992e33fe0f81f7a3c, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/C/f3449e25f1e94908b4f15b407ce83594, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/C/e9b048e844f3430b8e8f7f1991641724, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/C/8e4425f92f504effb3974aa76da9edb1, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/C/d10eb6c22482409da5ebd12481aeb49b, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/C/d37e6a157e1541e8805f98d659535b4f, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/C/66108b2253154ade8c8f37ea74e8b530, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/C/e711ee5275524fd3a4130d0eb5f7c387, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/C/071b08a8b8ba4b59898ea9cf8a3263ec, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/C/41cfddc0af924381b192c213d0e0d916, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/C/aa172b1cac9e4abea1df47e4c7531553, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/C/bd6a3ea46fac493e9c948398025cfea5, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/C/7e5b17435a3d40f0ab8403eaf4dcc9d7, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/C/751f5f0011764921a3f4ef01ebbb52b3, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/C/01c87d7424da4e7daf0c91810cd6307f, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/C/9956edc1acd844efaee5198751249934, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/C/f469cec1cc5847ccb07c92db438c5be3, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/C/9063047429b1471685b053e906a4ab46, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/C/f476703c58d542819a907410c009ab56, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/C/7149b2f90a3c483c8901bb68c6790d4d, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/C/338476f0040b40398c950b3f9fd2aecc, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/C/2ba46f7e8c9f49779557155e27ac9909, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/C/1f8eeb532c9a45e1998b9b49114fbca0, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/C/fb6bce94783b4b69a643a8087cc906d5, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/C/723cdc69c4854c08872d60ecbfcbd023, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/C/9aa024b785044948a237cd5d82874b77, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/C/72a772dfc7d2448090e153dba7924ec3, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/C/6d00eb408a18495e8c7f30b6de5f00f7, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/C/9657c83e9f6147ebabdda904bd0bef16, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/C/a5ecf59ac00842d9bb5b8a69e1de1669, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/C/c5eba097dad54ba1a6af98e7905a7830, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/C/cf4c76a68dae4ecca8e957a6bc9242c3, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/C/abbc1f5ff4174350966ead4fba040067, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/C/f5311e2e899f4639bf18a4afbfc4a595, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/C/d6d1bdb403e54311a167d0c9ece3070d] to archive 2024-11-27T16:22:34,704 DEBUG [StoreCloser-TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-27T16:22:34,705 DEBUG [StoreCloser-TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/C/c7b2d77f530548e992e33fe0f81f7a3c to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/C/c7b2d77f530548e992e33fe0f81f7a3c 2024-11-27T16:22:34,706 DEBUG [StoreCloser-TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/C/f3449e25f1e94908b4f15b407ce83594 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/C/f3449e25f1e94908b4f15b407ce83594 2024-11-27T16:22:34,707 DEBUG [StoreCloser-TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/C/e9b048e844f3430b8e8f7f1991641724 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/C/e9b048e844f3430b8e8f7f1991641724 2024-11-27T16:22:34,708 DEBUG [StoreCloser-TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/C/8e4425f92f504effb3974aa76da9edb1 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/C/8e4425f92f504effb3974aa76da9edb1 2024-11-27T16:22:34,709 DEBUG [StoreCloser-TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/C/d10eb6c22482409da5ebd12481aeb49b to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/C/d10eb6c22482409da5ebd12481aeb49b 2024-11-27T16:22:34,709 DEBUG [StoreCloser-TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/C/d37e6a157e1541e8805f98d659535b4f to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/C/d37e6a157e1541e8805f98d659535b4f 2024-11-27T16:22:34,710 DEBUG [StoreCloser-TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/C/66108b2253154ade8c8f37ea74e8b530 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/C/66108b2253154ade8c8f37ea74e8b530 2024-11-27T16:22:34,711 DEBUG [StoreCloser-TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/C/e711ee5275524fd3a4130d0eb5f7c387 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/C/e711ee5275524fd3a4130d0eb5f7c387 2024-11-27T16:22:34,712 DEBUG [StoreCloser-TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/C/071b08a8b8ba4b59898ea9cf8a3263ec to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/C/071b08a8b8ba4b59898ea9cf8a3263ec 2024-11-27T16:22:34,713 DEBUG [StoreCloser-TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/C/41cfddc0af924381b192c213d0e0d916 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/C/41cfddc0af924381b192c213d0e0d916 2024-11-27T16:22:34,713 DEBUG [StoreCloser-TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/C/aa172b1cac9e4abea1df47e4c7531553 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/C/aa172b1cac9e4abea1df47e4c7531553 2024-11-27T16:22:34,714 DEBUG [StoreCloser-TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/C/bd6a3ea46fac493e9c948398025cfea5 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/C/bd6a3ea46fac493e9c948398025cfea5 2024-11-27T16:22:34,715 DEBUG [StoreCloser-TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/C/7e5b17435a3d40f0ab8403eaf4dcc9d7 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/C/7e5b17435a3d40f0ab8403eaf4dcc9d7 2024-11-27T16:22:34,716 DEBUG [StoreCloser-TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/C/751f5f0011764921a3f4ef01ebbb52b3 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/C/751f5f0011764921a3f4ef01ebbb52b3 2024-11-27T16:22:34,717 DEBUG [StoreCloser-TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/C/01c87d7424da4e7daf0c91810cd6307f to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/C/01c87d7424da4e7daf0c91810cd6307f 2024-11-27T16:22:34,717 DEBUG [StoreCloser-TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/C/9956edc1acd844efaee5198751249934 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/C/9956edc1acd844efaee5198751249934 2024-11-27T16:22:34,718 DEBUG [StoreCloser-TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/C/f469cec1cc5847ccb07c92db438c5be3 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/C/f469cec1cc5847ccb07c92db438c5be3 2024-11-27T16:22:34,719 DEBUG [StoreCloser-TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/C/9063047429b1471685b053e906a4ab46 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/C/9063047429b1471685b053e906a4ab46 2024-11-27T16:22:34,720 DEBUG [StoreCloser-TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/C/f476703c58d542819a907410c009ab56 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/C/f476703c58d542819a907410c009ab56 2024-11-27T16:22:34,720 DEBUG [StoreCloser-TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/C/7149b2f90a3c483c8901bb68c6790d4d to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/C/7149b2f90a3c483c8901bb68c6790d4d 2024-11-27T16:22:34,721 DEBUG [StoreCloser-TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/C/338476f0040b40398c950b3f9fd2aecc to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/C/338476f0040b40398c950b3f9fd2aecc 2024-11-27T16:22:34,722 DEBUG [StoreCloser-TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/C/2ba46f7e8c9f49779557155e27ac9909 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/C/2ba46f7e8c9f49779557155e27ac9909 2024-11-27T16:22:34,723 DEBUG [StoreCloser-TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/C/1f8eeb532c9a45e1998b9b49114fbca0 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/C/1f8eeb532c9a45e1998b9b49114fbca0 2024-11-27T16:22:34,724 DEBUG [StoreCloser-TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/C/fb6bce94783b4b69a643a8087cc906d5 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/C/fb6bce94783b4b69a643a8087cc906d5 2024-11-27T16:22:34,725 DEBUG [StoreCloser-TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/C/723cdc69c4854c08872d60ecbfcbd023 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/C/723cdc69c4854c08872d60ecbfcbd023 2024-11-27T16:22:34,725 DEBUG [StoreCloser-TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/C/9aa024b785044948a237cd5d82874b77 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/C/9aa024b785044948a237cd5d82874b77 2024-11-27T16:22:34,726 DEBUG [StoreCloser-TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/C/72a772dfc7d2448090e153dba7924ec3 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/C/72a772dfc7d2448090e153dba7924ec3 2024-11-27T16:22:34,727 DEBUG [StoreCloser-TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/C/6d00eb408a18495e8c7f30b6de5f00f7 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/C/6d00eb408a18495e8c7f30b6de5f00f7 2024-11-27T16:22:34,728 DEBUG [StoreCloser-TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/C/9657c83e9f6147ebabdda904bd0bef16 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/C/9657c83e9f6147ebabdda904bd0bef16 2024-11-27T16:22:34,729 DEBUG [StoreCloser-TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/C/a5ecf59ac00842d9bb5b8a69e1de1669 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/C/a5ecf59ac00842d9bb5b8a69e1de1669 2024-11-27T16:22:34,730 DEBUG [StoreCloser-TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/C/c5eba097dad54ba1a6af98e7905a7830 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/C/c5eba097dad54ba1a6af98e7905a7830 2024-11-27T16:22:34,731 DEBUG [StoreCloser-TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/C/cf4c76a68dae4ecca8e957a6bc9242c3 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/C/cf4c76a68dae4ecca8e957a6bc9242c3 2024-11-27T16:22:34,732 DEBUG [StoreCloser-TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/C/abbc1f5ff4174350966ead4fba040067 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/C/abbc1f5ff4174350966ead4fba040067 2024-11-27T16:22:34,733 DEBUG [StoreCloser-TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/C/f5311e2e899f4639bf18a4afbfc4a595 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/C/f5311e2e899f4639bf18a4afbfc4a595 2024-11-27T16:22:34,734 DEBUG [StoreCloser-TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/C/d6d1bdb403e54311a167d0c9ece3070d to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/C/d6d1bdb403e54311a167d0c9ece3070d 2024-11-27T16:22:34,738 DEBUG [RS_CLOSE_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_CLOSE_REGION, pid=90}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/recovered.edits/498.seqid, newMaxSeqId=498, maxSeqId=1 2024-11-27T16:22:34,739 INFO [RS_CLOSE_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_CLOSE_REGION, pid=90}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574. 2024-11-27T16:22:34,739 DEBUG [RS_CLOSE_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_CLOSE_REGION, pid=90}] regionserver.HRegion(1635): Region close journal for b7c24f821c64d1ed1608bef04711b574: 2024-11-27T16:22:34,740 INFO [RS_CLOSE_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_CLOSE_REGION, pid=90}] handler.UnassignRegionHandler(170): Closed b7c24f821c64d1ed1608bef04711b574 2024-11-27T16:22:34,741 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=89 updating hbase:meta row=b7c24f821c64d1ed1608bef04711b574, regionState=CLOSED 2024-11-27T16:22:34,743 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=90, resume processing ppid=89 2024-11-27T16:22:34,743 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=90, ppid=89, state=SUCCESS; CloseRegionProcedure b7c24f821c64d1ed1608bef04711b574, server=7b191dec6496,44169,1732724452967 in 769 msec 2024-11-27T16:22:34,744 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=89, resume processing ppid=88 2024-11-27T16:22:34,744 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=89, ppid=88, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=b7c24f821c64d1ed1608bef04711b574, UNASSIGN in 773 msec 2024-11-27T16:22:34,745 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=88, resume processing ppid=87 2024-11-27T16:22:34,745 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=88, ppid=87, state=SUCCESS; CloseTableRegionsProcedure table=TestAcidGuarantees in 775 msec 2024-11-27T16:22:34,746 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732724554746"}]},"ts":"1732724554746"} 2024-11-27T16:22:34,747 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLED in hbase:meta 2024-11-27T16:22:34,750 INFO [PEWorker-1 {}] procedure.DisableTableProcedure(296): Set TestAcidGuarantees to state=DISABLED 2024-11-27T16:22:34,751 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=87, state=SUCCESS; DisableTableProcedure table=TestAcidGuarantees in 789 msec 2024-11-27T16:22:35,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=87 2024-11-27T16:22:35,069 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:TestAcidGuarantees, procId: 87 completed 2024-11-27T16:22:35,069 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete TestAcidGuarantees 2024-11-27T16:22:35,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] procedure2.ProcedureExecutor(1098): Stored pid=91, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=TestAcidGuarantees 2024-11-27T16:22:35,071 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=91, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-27T16:22:35,071 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=91, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-27T16:22:35,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=91 2024-11-27T16:22:35,074 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574 2024-11-27T16:22:35,075 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/A, FileablePath, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/B, FileablePath, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/C, FileablePath, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/recovered.edits] 2024-11-27T16:22:35,078 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/A/da910625906c4fec89e8e3f421346364 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/A/da910625906c4fec89e8e3f421346364 2024-11-27T16:22:35,079 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/A/e22fedd60ba14d30873a30a56c0ef671 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/A/e22fedd60ba14d30873a30a56c0ef671 2024-11-27T16:22:35,081 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/B/12ad51ba49c5468eb5ef31205456550c to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/B/12ad51ba49c5468eb5ef31205456550c 2024-11-27T16:22:35,082 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/B/4e18dbeac2e44859ab8851a2bb62d9e5 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/B/4e18dbeac2e44859ab8851a2bb62d9e5 2024-11-27T16:22:35,084 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/C/3e4bf9c81d8e494f9eae1bfb9bb688e8 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/C/3e4bf9c81d8e494f9eae1bfb9bb688e8 2024-11-27T16:22:35,086 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/C/b2438964b65e453a8f6f905b08478b95 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/C/b2438964b65e453a8f6f905b08478b95 2024-11-27T16:22:35,089 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/recovered.edits/498.seqid to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574/recovered.edits/498.seqid 2024-11-27T16:22:35,089 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/b7c24f821c64d1ed1608bef04711b574 2024-11-27T16:22:35,089 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(313): Archived TestAcidGuarantees regions 2024-11-27T16:22:35,091 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=91, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-27T16:22:35,094 WARN [PEWorker-2 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 1 rows of TestAcidGuarantees from hbase:meta 2024-11-27T16:22:35,097 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(407): Removing 'TestAcidGuarantees' descriptor. 2024-11-27T16:22:35,098 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=91, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-27T16:22:35,098 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(397): Removing 'TestAcidGuarantees' from region states. 2024-11-27T16:22:35,098 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732724555098"}]},"ts":"9223372036854775807"} 2024-11-27T16:22:35,099 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1808): Deleted 1 regions from META 2024-11-27T16:22:35,099 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => b7c24f821c64d1ed1608bef04711b574, NAME => 'TestAcidGuarantees,,1732724523373.b7c24f821c64d1ed1608bef04711b574.', STARTKEY => '', ENDKEY => ''}] 2024-11-27T16:22:35,100 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(401): Marking 'TestAcidGuarantees' as deleted. 2024-11-27T16:22:35,100 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1732724555100"}]},"ts":"9223372036854775807"} 2024-11-27T16:22:35,102 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1678): Deleted table TestAcidGuarantees state from META 2024-11-27T16:22:35,105 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(133): Finished pid=91, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-27T16:22:35,105 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=91, state=SUCCESS; DeleteTableProcedure table=TestAcidGuarantees in 36 msec 2024-11-27T16:22:35,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=91 2024-11-27T16:22:35,173 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:TestAcidGuarantees, procId: 91 completed 2024-11-27T16:22:35,183 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: TestAcidGuaranteesWithBasicPolicy#testGetAtomicity Thread=237 (was 237), OpenFileDescriptor=453 (was 450) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=650 (was 404) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=4013 (was 5101) 2024-11-27T16:22:35,192 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: TestAcidGuaranteesWithBasicPolicy#testMobScanAtomicity Thread=237, OpenFileDescriptor=453, MaxFileDescriptor=1048576, SystemLoadAverage=650, ProcessCount=11, AvailableMemoryMB=4013 2024-11-27T16:22:35,193 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-11-27T16:22:35,194 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'BASIC'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-27T16:22:35,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] procedure2.ProcedureExecutor(1098): Stored pid=92, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestAcidGuarantees 2024-11-27T16:22:35,195 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=92, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_PRE_OPERATION 2024-11-27T16:22:35,196 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestAcidGuarantees" procId is: 92 2024-11-27T16:22:35,196 DEBUG [PEWorker-5 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:22:35,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=92 2024-11-27T16:22:35,196 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=92, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-27T16:22:35,202 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742199_1375 (size=960) 2024-11-27T16:22:35,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=92 2024-11-27T16:22:35,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=92 2024-11-27T16:22:35,604 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => a4d3cff76c8a3133e00d8e6d8859dc7d, NAME => 'TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'BASIC', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59 2024-11-27T16:22:35,609 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742200_1376 (size=53) 2024-11-27T16:22:35,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=92 2024-11-27T16:22:36,010 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-27T16:22:36,010 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1681): Closing a4d3cff76c8a3133e00d8e6d8859dc7d, disabling compactions & flushes 2024-11-27T16:22:36,010 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d. 2024-11-27T16:22:36,010 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d. 2024-11-27T16:22:36,010 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d. after waiting 0 ms 2024-11-27T16:22:36,010 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d. 2024-11-27T16:22:36,010 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d. 2024-11-27T16:22:36,010 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1635): Region close journal for a4d3cff76c8a3133e00d8e6d8859dc7d: 2024-11-27T16:22:36,011 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=92, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ADD_TO_META 2024-11-27T16:22:36,012 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d.","families":{"info":[{"qualifier":"regioninfo","vlen":52,"tag":[],"timestamp":"1732724556011"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732724556011"}]},"ts":"1732724556011"} 2024-11-27T16:22:36,013 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-11-27T16:22:36,013 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=92, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-27T16:22:36,014 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732724556013"}]},"ts":"1732724556013"} 2024-11-27T16:22:36,014 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLING in hbase:meta 2024-11-27T16:22:36,019 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=93, ppid=92, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=a4d3cff76c8a3133e00d8e6d8859dc7d, ASSIGN}] 2024-11-27T16:22:36,019 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=93, ppid=92, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=a4d3cff76c8a3133e00d8e6d8859dc7d, ASSIGN 2024-11-27T16:22:36,020 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(264): Starting pid=93, ppid=92, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=a4d3cff76c8a3133e00d8e6d8859dc7d, ASSIGN; state=OFFLINE, location=7b191dec6496,44169,1732724452967; forceNewPlan=false, retain=false 2024-11-27T16:22:36,170 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=93 updating hbase:meta row=a4d3cff76c8a3133e00d8e6d8859dc7d, regionState=OPENING, regionLocation=7b191dec6496,44169,1732724452967 2024-11-27T16:22:36,172 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=94, ppid=93, state=RUNNABLE; OpenRegionProcedure a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967}] 2024-11-27T16:22:36,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=92 2024-11-27T16:22:36,323 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:22:36,325 INFO [RS_OPEN_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_OPEN_REGION, pid=94}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d. 2024-11-27T16:22:36,326 DEBUG [RS_OPEN_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_OPEN_REGION, pid=94}] regionserver.HRegion(7285): Opening region: {ENCODED => a4d3cff76c8a3133e00d8e6d8859dc7d, NAME => 'TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d.', STARTKEY => '', ENDKEY => ''} 2024-11-27T16:22:36,326 DEBUG [RS_OPEN_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_OPEN_REGION, pid=94}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees a4d3cff76c8a3133e00d8e6d8859dc7d 2024-11-27T16:22:36,326 DEBUG [RS_OPEN_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_OPEN_REGION, pid=94}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-27T16:22:36,326 DEBUG [RS_OPEN_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_OPEN_REGION, pid=94}] regionserver.HRegion(7327): checking encryption for a4d3cff76c8a3133e00d8e6d8859dc7d 2024-11-27T16:22:36,326 DEBUG [RS_OPEN_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_OPEN_REGION, pid=94}] regionserver.HRegion(7330): checking classloading for a4d3cff76c8a3133e00d8e6d8859dc7d 2024-11-27T16:22:36,327 INFO [StoreOpener-a4d3cff76c8a3133e00d8e6d8859dc7d-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region a4d3cff76c8a3133e00d8e6d8859dc7d 2024-11-27T16:22:36,328 INFO [StoreOpener-a4d3cff76c8a3133e00d8e6d8859dc7d-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-27T16:22:36,328 INFO [StoreOpener-a4d3cff76c8a3133e00d8e6d8859dc7d-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region a4d3cff76c8a3133e00d8e6d8859dc7d columnFamilyName A 2024-11-27T16:22:36,328 DEBUG [StoreOpener-a4d3cff76c8a3133e00d8e6d8859dc7d-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:22:36,329 INFO [StoreOpener-a4d3cff76c8a3133e00d8e6d8859dc7d-1 {}] regionserver.HStore(327): Store=a4d3cff76c8a3133e00d8e6d8859dc7d/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-27T16:22:36,329 INFO [StoreOpener-a4d3cff76c8a3133e00d8e6d8859dc7d-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region a4d3cff76c8a3133e00d8e6d8859dc7d 2024-11-27T16:22:36,330 INFO [StoreOpener-a4d3cff76c8a3133e00d8e6d8859dc7d-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-27T16:22:36,330 INFO [StoreOpener-a4d3cff76c8a3133e00d8e6d8859dc7d-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region a4d3cff76c8a3133e00d8e6d8859dc7d columnFamilyName B 2024-11-27T16:22:36,330 DEBUG [StoreOpener-a4d3cff76c8a3133e00d8e6d8859dc7d-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:22:36,330 INFO [StoreOpener-a4d3cff76c8a3133e00d8e6d8859dc7d-1 {}] regionserver.HStore(327): Store=a4d3cff76c8a3133e00d8e6d8859dc7d/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-27T16:22:36,330 INFO [StoreOpener-a4d3cff76c8a3133e00d8e6d8859dc7d-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region a4d3cff76c8a3133e00d8e6d8859dc7d 2024-11-27T16:22:36,331 INFO [StoreOpener-a4d3cff76c8a3133e00d8e6d8859dc7d-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-27T16:22:36,331 INFO [StoreOpener-a4d3cff76c8a3133e00d8e6d8859dc7d-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region a4d3cff76c8a3133e00d8e6d8859dc7d columnFamilyName C 2024-11-27T16:22:36,331 DEBUG [StoreOpener-a4d3cff76c8a3133e00d8e6d8859dc7d-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:22:36,331 INFO [StoreOpener-a4d3cff76c8a3133e00d8e6d8859dc7d-1 {}] regionserver.HStore(327): Store=a4d3cff76c8a3133e00d8e6d8859dc7d/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-27T16:22:36,332 INFO [RS_OPEN_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_OPEN_REGION, pid=94}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d. 2024-11-27T16:22:36,332 DEBUG [RS_OPEN_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_OPEN_REGION, pid=94}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d 2024-11-27T16:22:36,332 DEBUG [RS_OPEN_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_OPEN_REGION, pid=94}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d 2024-11-27T16:22:36,333 DEBUG [RS_OPEN_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_OPEN_REGION, pid=94}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-27T16:22:36,334 DEBUG [RS_OPEN_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_OPEN_REGION, pid=94}] regionserver.HRegion(1085): writing seq id for a4d3cff76c8a3133e00d8e6d8859dc7d 2024-11-27T16:22:36,336 DEBUG [RS_OPEN_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_OPEN_REGION, pid=94}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-27T16:22:36,336 INFO [RS_OPEN_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_OPEN_REGION, pid=94}] regionserver.HRegion(1102): Opened a4d3cff76c8a3133e00d8e6d8859dc7d; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=71965451, jitterRate=0.07236878573894501}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-27T16:22:36,337 DEBUG [RS_OPEN_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_OPEN_REGION, pid=94}] regionserver.HRegion(1001): Region open journal for a4d3cff76c8a3133e00d8e6d8859dc7d: 2024-11-27T16:22:36,338 INFO [RS_OPEN_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_OPEN_REGION, pid=94}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d., pid=94, masterSystemTime=1732724556323 2024-11-27T16:22:36,339 DEBUG [RS_OPEN_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_OPEN_REGION, pid=94}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d. 2024-11-27T16:22:36,339 INFO [RS_OPEN_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_OPEN_REGION, pid=94}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d. 2024-11-27T16:22:36,339 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=93 updating hbase:meta row=a4d3cff76c8a3133e00d8e6d8859dc7d, regionState=OPEN, openSeqNum=2, regionLocation=7b191dec6496,44169,1732724452967 2024-11-27T16:22:36,341 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=94, resume processing ppid=93 2024-11-27T16:22:36,341 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=94, ppid=93, state=SUCCESS; OpenRegionProcedure a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 in 169 msec 2024-11-27T16:22:36,342 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=93, resume processing ppid=92 2024-11-27T16:22:36,342 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=93, ppid=92, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=a4d3cff76c8a3133e00d8e6d8859dc7d, ASSIGN in 323 msec 2024-11-27T16:22:36,342 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=92, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-27T16:22:36,343 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732724556342"}]},"ts":"1732724556342"} 2024-11-27T16:22:36,343 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLED in hbase:meta 2024-11-27T16:22:36,345 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=92, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_POST_OPERATION 2024-11-27T16:22:36,346 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=92, state=SUCCESS; CreateTableProcedure table=TestAcidGuarantees in 1.1520 sec 2024-11-27T16:22:37,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=92 2024-11-27T16:22:37,301 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestAcidGuarantees, procId: 92 completed 2024-11-27T16:22:37,302 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x2ecf33fc to 127.0.0.1:51088 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@47290c4 2024-11-27T16:22:37,306 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7267b857, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-27T16:22:37,307 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-27T16:22:37,308 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33038, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-27T16:22:37,309 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-27T16:22:37,310 INFO [RS-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52002, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-27T16:22:37,311 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-11-27T16:22:37,312 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster$14(2798): Client=jenkins//172.17.0.2 modify table TestAcidGuarantees from 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'BASIC', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} to 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'BASIC', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '4', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-27T16:22:37,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] procedure2.ProcedureExecutor(1098): Stored pid=95, state=RUNNABLE:MODIFY_TABLE_PREPARE; ModifyTableProcedure table=TestAcidGuarantees 2024-11-27T16:22:37,320 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742201_1377 (size=996) 2024-11-27T16:22:37,722 DEBUG [PEWorker-3 {}] util.FSTableDescriptors(519): Deleted hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/.tabledesc/.tableinfo.0000000001.960 2024-11-27T16:22:37,722 INFO [PEWorker-3 {}] util.FSTableDescriptors(297): Updated tableinfo=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/.tabledesc/.tableinfo.0000000002.996 2024-11-27T16:22:37,724 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=96, ppid=95, state=RUNNABLE:REOPEN_TABLE_REGIONS_GET_REGIONS; ReopenTableRegionsProcedure table=TestAcidGuarantees}] 2024-11-27T16:22:37,726 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=97, ppid=96, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=a4d3cff76c8a3133e00d8e6d8859dc7d, REOPEN/MOVE}] 2024-11-27T16:22:37,727 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=97, ppid=96, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=a4d3cff76c8a3133e00d8e6d8859dc7d, REOPEN/MOVE 2024-11-27T16:22:37,727 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=97 updating hbase:meta row=a4d3cff76c8a3133e00d8e6d8859dc7d, regionState=CLOSING, regionLocation=7b191dec6496,44169,1732724452967 2024-11-27T16:22:37,728 WARN [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41377 {}] assignment.AssignmentManager(1526): Unable to acquire lock for regionNode state=CLOSING, location=7b191dec6496,44169,1732724452967, table=TestAcidGuarantees, region=a4d3cff76c8a3133e00d8e6d8859dc7d. It is likely that another thread is currently holding the lock. To avoid deadlock, skip execution for now. 2024-11-27T16:22:37,728 DEBUG [PEWorker-2 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-27T16:22:37,728 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=98, ppid=97, state=RUNNABLE; CloseRegionProcedure a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967}] 2024-11-27T16:22:37,879 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:22:37,880 INFO [RS_CLOSE_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_CLOSE_REGION, pid=98}] handler.UnassignRegionHandler(124): Close a4d3cff76c8a3133e00d8e6d8859dc7d 2024-11-27T16:22:37,880 DEBUG [RS_CLOSE_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_CLOSE_REGION, pid=98}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-11-27T16:22:37,880 DEBUG [RS_CLOSE_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_CLOSE_REGION, pid=98}] regionserver.HRegion(1681): Closing a4d3cff76c8a3133e00d8e6d8859dc7d, disabling compactions & flushes 2024-11-27T16:22:37,880 INFO [RS_CLOSE_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_CLOSE_REGION, pid=98}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d. 2024-11-27T16:22:37,880 DEBUG [RS_CLOSE_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_CLOSE_REGION, pid=98}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d. 2024-11-27T16:22:37,880 DEBUG [RS_CLOSE_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_CLOSE_REGION, pid=98}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d. after waiting 0 ms 2024-11-27T16:22:37,880 DEBUG [RS_CLOSE_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_CLOSE_REGION, pid=98}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d. 2024-11-27T16:22:37,884 DEBUG [RS_CLOSE_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_CLOSE_REGION, pid=98}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/recovered.edits/4.seqid, newMaxSeqId=4, maxSeqId=1 2024-11-27T16:22:37,884 INFO [RS_CLOSE_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_CLOSE_REGION, pid=98}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d. 2024-11-27T16:22:37,884 DEBUG [RS_CLOSE_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_CLOSE_REGION, pid=98}] regionserver.HRegion(1635): Region close journal for a4d3cff76c8a3133e00d8e6d8859dc7d: 2024-11-27T16:22:37,884 WARN [RS_CLOSE_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_CLOSE_REGION, pid=98}] regionserver.HRegionServer(3786): Not adding moved region record: a4d3cff76c8a3133e00d8e6d8859dc7d to self. 2024-11-27T16:22:37,885 INFO [RS_CLOSE_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_CLOSE_REGION, pid=98}] handler.UnassignRegionHandler(170): Closed a4d3cff76c8a3133e00d8e6d8859dc7d 2024-11-27T16:22:37,886 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=97 updating hbase:meta row=a4d3cff76c8a3133e00d8e6d8859dc7d, regionState=CLOSED 2024-11-27T16:22:37,887 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=98, resume processing ppid=97 2024-11-27T16:22:37,888 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=98, ppid=97, state=SUCCESS; CloseRegionProcedure a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 in 159 msec 2024-11-27T16:22:37,888 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(264): Starting pid=97, ppid=96, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=a4d3cff76c8a3133e00d8e6d8859dc7d, REOPEN/MOVE; state=CLOSED, location=7b191dec6496,44169,1732724452967; forceNewPlan=false, retain=true 2024-11-27T16:22:38,038 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=97 updating hbase:meta row=a4d3cff76c8a3133e00d8e6d8859dc7d, regionState=OPENING, regionLocation=7b191dec6496,44169,1732724452967 2024-11-27T16:22:38,040 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=99, ppid=97, state=RUNNABLE; OpenRegionProcedure a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967}] 2024-11-27T16:22:38,191 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:22:38,194 INFO [RS_OPEN_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d. 2024-11-27T16:22:38,194 DEBUG [RS_OPEN_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegion(7285): Opening region: {ENCODED => a4d3cff76c8a3133e00d8e6d8859dc7d, NAME => 'TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d.', STARTKEY => '', ENDKEY => ''} 2024-11-27T16:22:38,194 DEBUG [RS_OPEN_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees a4d3cff76c8a3133e00d8e6d8859dc7d 2024-11-27T16:22:38,194 DEBUG [RS_OPEN_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-27T16:22:38,195 DEBUG [RS_OPEN_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegion(7327): checking encryption for a4d3cff76c8a3133e00d8e6d8859dc7d 2024-11-27T16:22:38,195 DEBUG [RS_OPEN_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegion(7330): checking classloading for a4d3cff76c8a3133e00d8e6d8859dc7d 2024-11-27T16:22:38,196 INFO [StoreOpener-a4d3cff76c8a3133e00d8e6d8859dc7d-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region a4d3cff76c8a3133e00d8e6d8859dc7d 2024-11-27T16:22:38,197 INFO [StoreOpener-a4d3cff76c8a3133e00d8e6d8859dc7d-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-27T16:22:38,197 INFO [StoreOpener-a4d3cff76c8a3133e00d8e6d8859dc7d-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region a4d3cff76c8a3133e00d8e6d8859dc7d columnFamilyName A 2024-11-27T16:22:38,198 DEBUG [StoreOpener-a4d3cff76c8a3133e00d8e6d8859dc7d-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:22:38,198 INFO [StoreOpener-a4d3cff76c8a3133e00d8e6d8859dc7d-1 {}] regionserver.HStore(327): Store=a4d3cff76c8a3133e00d8e6d8859dc7d/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-27T16:22:38,199 INFO [StoreOpener-a4d3cff76c8a3133e00d8e6d8859dc7d-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region a4d3cff76c8a3133e00d8e6d8859dc7d 2024-11-27T16:22:38,199 INFO [StoreOpener-a4d3cff76c8a3133e00d8e6d8859dc7d-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-27T16:22:38,200 INFO [StoreOpener-a4d3cff76c8a3133e00d8e6d8859dc7d-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region a4d3cff76c8a3133e00d8e6d8859dc7d columnFamilyName B 2024-11-27T16:22:38,200 DEBUG [StoreOpener-a4d3cff76c8a3133e00d8e6d8859dc7d-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:22:38,200 INFO [StoreOpener-a4d3cff76c8a3133e00d8e6d8859dc7d-1 {}] regionserver.HStore(327): Store=a4d3cff76c8a3133e00d8e6d8859dc7d/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-27T16:22:38,200 INFO [StoreOpener-a4d3cff76c8a3133e00d8e6d8859dc7d-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region a4d3cff76c8a3133e00d8e6d8859dc7d 2024-11-27T16:22:38,201 INFO [StoreOpener-a4d3cff76c8a3133e00d8e6d8859dc7d-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-27T16:22:38,201 INFO [StoreOpener-a4d3cff76c8a3133e00d8e6d8859dc7d-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region a4d3cff76c8a3133e00d8e6d8859dc7d columnFamilyName C 2024-11-27T16:22:38,201 DEBUG [StoreOpener-a4d3cff76c8a3133e00d8e6d8859dc7d-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:22:38,201 INFO [StoreOpener-a4d3cff76c8a3133e00d8e6d8859dc7d-1 {}] regionserver.HStore(327): Store=a4d3cff76c8a3133e00d8e6d8859dc7d/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-27T16:22:38,201 INFO [RS_OPEN_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d. 2024-11-27T16:22:38,202 DEBUG [RS_OPEN_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d 2024-11-27T16:22:38,202 DEBUG [RS_OPEN_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d 2024-11-27T16:22:38,203 DEBUG [RS_OPEN_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-27T16:22:38,204 DEBUG [RS_OPEN_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegion(1085): writing seq id for a4d3cff76c8a3133e00d8e6d8859dc7d 2024-11-27T16:22:38,205 INFO [RS_OPEN_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegion(1102): Opened a4d3cff76c8a3133e00d8e6d8859dc7d; next sequenceid=5; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=73781496, jitterRate=0.09942996501922607}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-27T16:22:38,206 DEBUG [RS_OPEN_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegion(1001): Region open journal for a4d3cff76c8a3133e00d8e6d8859dc7d: 2024-11-27T16:22:38,206 INFO [RS_OPEN_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d., pid=99, masterSystemTime=1732724558191 2024-11-27T16:22:38,207 DEBUG [RS_OPEN_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d. 2024-11-27T16:22:38,207 INFO [RS_OPEN_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d. 2024-11-27T16:22:38,208 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=97 updating hbase:meta row=a4d3cff76c8a3133e00d8e6d8859dc7d, regionState=OPEN, openSeqNum=5, regionLocation=7b191dec6496,44169,1732724452967 2024-11-27T16:22:38,209 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=99, resume processing ppid=97 2024-11-27T16:22:38,209 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=99, ppid=97, state=SUCCESS; OpenRegionProcedure a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 in 170 msec 2024-11-27T16:22:38,210 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=97, resume processing ppid=96 2024-11-27T16:22:38,210 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=97, ppid=96, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=a4d3cff76c8a3133e00d8e6d8859dc7d, REOPEN/MOVE in 483 msec 2024-11-27T16:22:38,212 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=96, resume processing ppid=95 2024-11-27T16:22:38,212 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=96, ppid=95, state=SUCCESS; ReopenTableRegionsProcedure table=TestAcidGuarantees in 487 msec 2024-11-27T16:22:38,213 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=95, state=SUCCESS; ModifyTableProcedure table=TestAcidGuarantees in 900 msec 2024-11-27T16:22:38,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=95 2024-11-27T16:22:38,215 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x3c8cc27b to 127.0.0.1:51088 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@2c68919e 2024-11-27T16:22:38,222 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@63822144, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-27T16:22:38,223 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x081cac4f to 127.0.0.1:51088 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@601038b3 2024-11-27T16:22:38,226 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@126abdf4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-27T16:22:38,227 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x64a04d7a to 127.0.0.1:51088 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@59434fd 2024-11-27T16:22:38,229 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@42d6bca6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-27T16:22:38,230 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x3268230a to 127.0.0.1:51088 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@167fda66 2024-11-27T16:22:38,232 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@61bb7783, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-27T16:22:38,233 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x6d7912a0 to 127.0.0.1:51088 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@5bd5983 2024-11-27T16:22:38,236 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3f0031d8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-27T16:22:38,237 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x3b7324d5 to 127.0.0.1:51088 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@5434c92 2024-11-27T16:22:38,240 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@53c186a8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-27T16:22:38,240 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x4d930fb1 to 127.0.0.1:51088 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@52abed4d 2024-11-27T16:22:38,246 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@d80c576, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-27T16:22:38,246 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x114e6211 to 127.0.0.1:51088 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@1c0234f0 2024-11-27T16:22:38,249 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@17a2e973, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-27T16:22:38,250 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x191ae36a to 127.0.0.1:51088 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@14b2e10d 2024-11-27T16:22:38,252 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@145b6b99, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-27T16:22:38,253 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x133cc1f0 to 127.0.0.1:51088 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@1085e013 2024-11-27T16:22:38,255 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5fcd5639, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-27T16:22:38,257 DEBUG [hconnection-0x2270e746-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-27T16:22:38,258 DEBUG [hconnection-0x5d05dd49-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-27T16:22:38,258 DEBUG [hconnection-0x63e810b9-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-27T16:22:38,259 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33040, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-27T16:22:38,259 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33056, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-27T16:22:38,259 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33066, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-27T16:22:38,259 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-27T16:22:38,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] procedure2.ProcedureExecutor(1098): Stored pid=100, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=100, table=TestAcidGuarantees 2024-11-27T16:22:38,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=100 2024-11-27T16:22:38,261 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=100, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=100, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-27T16:22:38,262 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=100, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=100, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-27T16:22:38,262 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=101, ppid=100, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-27T16:22:38,264 DEBUG [hconnection-0x5c5fb5df-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-27T16:22:38,265 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33082, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-27T16:22:38,265 DEBUG [hconnection-0x7396aebe-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-27T16:22:38,266 DEBUG [hconnection-0x77feb63b-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-27T16:22:38,266 DEBUG [hconnection-0x3d051bc1-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-27T16:22:38,266 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33092, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-27T16:22:38,267 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33102, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-27T16:22:38,267 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33110, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-27T16:22:38,267 DEBUG [hconnection-0xcc57515-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-27T16:22:38,267 DEBUG [hconnection-0x7096ae59-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-27T16:22:38,268 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33116, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-27T16:22:38,268 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33122, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-27T16:22:38,271 DEBUG [hconnection-0x2ba5b551-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-27T16:22:38,272 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33132, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-27T16:22:38,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(8581): Flush requested on a4d3cff76c8a3133e00d8e6d8859dc7d 2024-11-27T16:22:38,273 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing a4d3cff76c8a3133e00d8e6d8859dc7d 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-11-27T16:22:38,274 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a4d3cff76c8a3133e00d8e6d8859dc7d, store=A 2024-11-27T16:22:38,275 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:22:38,275 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a4d3cff76c8a3133e00d8e6d8859dc7d, store=B 2024-11-27T16:22:38,275 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:22:38,275 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a4d3cff76c8a3133e00d8e6d8859dc7d, store=C 2024-11-27T16:22:38,275 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:22:38,297 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:38,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33122 deadline: 1732724618292, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:38,301 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:38,301 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:38,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33056 deadline: 1732724618296, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:38,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33092 deadline: 1732724618295, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:38,302 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:38,302 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:38,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33110 deadline: 1732724618296, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:38,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33116 deadline: 1732724618297, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:38,304 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411274ff8ccd743f445e8aa8dc082c44e4bfe_a4d3cff76c8a3133e00d8e6d8859dc7d is 50, key is test_row_0/A:col10/1732724558265/Put/seqid=0 2024-11-27T16:22:38,308 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742202_1378 (size=14594) 2024-11-27T16:22:38,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=100 2024-11-27T16:22:38,399 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:38,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33122 deadline: 1732724618399, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:38,405 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:38,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33056 deadline: 1732724618403, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:38,405 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:38,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33110 deadline: 1732724618403, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:38,406 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:38,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33116 deadline: 1732724618403, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:38,406 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:38,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33092 deadline: 1732724618403, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:38,413 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:22:38,414 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=101 2024-11-27T16:22:38,414 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d. 2024-11-27T16:22:38,414 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d. as already flushing 2024-11-27T16:22:38,414 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d. 2024-11-27T16:22:38,414 ERROR [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=101}] handler.RSProcedureHandler(58): pid=101 java.io.IOException: Unable to complete flush {ENCODED => a4d3cff76c8a3133e00d8e6d8859dc7d, NAME => 'TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:22:38,414 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=101 java.io.IOException: Unable to complete flush {ENCODED => a4d3cff76c8a3133e00d8e6d8859dc7d, NAME => 'TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:22:38,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4114): Remote procedure failed, pid=101 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a4d3cff76c8a3133e00d8e6d8859dc7d, NAME => 'TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a4d3cff76c8a3133e00d8e6d8859dc7d, NAME => 'TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:22:38,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=100 2024-11-27T16:22:38,565 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:22:38,565 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=101 2024-11-27T16:22:38,566 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d. 2024-11-27T16:22:38,566 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d. as already flushing 2024-11-27T16:22:38,566 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d. 2024-11-27T16:22:38,566 ERROR [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] handler.RSProcedureHandler(58): pid=101 java.io.IOException: Unable to complete flush {ENCODED => a4d3cff76c8a3133e00d8e6d8859dc7d, NAME => 'TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:22:38,566 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=101 java.io.IOException: Unable to complete flush {ENCODED => a4d3cff76c8a3133e00d8e6d8859dc7d, NAME => 'TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:22:38,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4114): Remote procedure failed, pid=101 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a4d3cff76c8a3133e00d8e6d8859dc7d, NAME => 'TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a4d3cff76c8a3133e00d8e6d8859dc7d, NAME => 'TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:22:38,603 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:38,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33122 deadline: 1732724618601, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:38,609 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:38,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33056 deadline: 1732724618607, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:38,612 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:38,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33110 deadline: 1732724618608, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:38,612 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:38,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33116 deadline: 1732724618608, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:38,615 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:38,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33092 deadline: 1732724618612, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:38,710 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:22:38,714 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411274ff8ccd743f445e8aa8dc082c44e4bfe_a4d3cff76c8a3133e00d8e6d8859dc7d to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411274ff8ccd743f445e8aa8dc082c44e4bfe_a4d3cff76c8a3133e00d8e6d8859dc7d 2024-11-27T16:22:38,715 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/.tmp/A/2d050878dd394b9e8a641bddca003329, store: [table=TestAcidGuarantees family=A region=a4d3cff76c8a3133e00d8e6d8859dc7d] 2024-11-27T16:22:38,716 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/.tmp/A/2d050878dd394b9e8a641bddca003329 is 175, key is test_row_0/A:col10/1732724558265/Put/seqid=0 2024-11-27T16:22:38,718 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:22:38,718 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=101 2024-11-27T16:22:38,718 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d. 2024-11-27T16:22:38,718 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d. as already flushing 2024-11-27T16:22:38,718 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d. 2024-11-27T16:22:38,719 ERROR [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=101}] handler.RSProcedureHandler(58): pid=101 java.io.IOException: Unable to complete flush {ENCODED => a4d3cff76c8a3133e00d8e6d8859dc7d, NAME => 'TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:22:38,719 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=101 java.io.IOException: Unable to complete flush {ENCODED => a4d3cff76c8a3133e00d8e6d8859dc7d, NAME => 'TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:22:38,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4114): Remote procedure failed, pid=101 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a4d3cff76c8a3133e00d8e6d8859dc7d, NAME => 'TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a4d3cff76c8a3133e00d8e6d8859dc7d, NAME => 'TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:22:38,724 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742203_1379 (size=39549) 2024-11-27T16:22:38,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=100 2024-11-27T16:22:38,871 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:22:38,871 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=101 2024-11-27T16:22:38,871 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d. 2024-11-27T16:22:38,872 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d. as already flushing 2024-11-27T16:22:38,872 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d. 2024-11-27T16:22:38,872 ERROR [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=101}] handler.RSProcedureHandler(58): pid=101 java.io.IOException: Unable to complete flush {ENCODED => a4d3cff76c8a3133e00d8e6d8859dc7d, NAME => 'TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:22:38,872 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=101 java.io.IOException: Unable to complete flush {ENCODED => a4d3cff76c8a3133e00d8e6d8859dc7d, NAME => 'TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:22:38,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4114): Remote procedure failed, pid=101 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a4d3cff76c8a3133e00d8e6d8859dc7d, NAME => 'TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a4d3cff76c8a3133e00d8e6d8859dc7d, NAME => 'TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:22:38,908 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:38,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33122 deadline: 1732724618904, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:38,913 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:38,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33056 deadline: 1732724618910, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:38,920 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:38,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33092 deadline: 1732724618916, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:38,921 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:38,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33110 deadline: 1732724618916, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:38,921 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:38,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33116 deadline: 1732724618917, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:39,024 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:22:39,025 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=101 2024-11-27T16:22:39,025 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d. 2024-11-27T16:22:39,025 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d. as already flushing 2024-11-27T16:22:39,025 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d. 2024-11-27T16:22:39,025 ERROR [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] handler.RSProcedureHandler(58): pid=101 java.io.IOException: Unable to complete flush {ENCODED => a4d3cff76c8a3133e00d8e6d8859dc7d, NAME => 'TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:22:39,025 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=101 java.io.IOException: Unable to complete flush {ENCODED => a4d3cff76c8a3133e00d8e6d8859dc7d, NAME => 'TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:22:39,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4114): Remote procedure failed, pid=101 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a4d3cff76c8a3133e00d8e6d8859dc7d, NAME => 'TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a4d3cff76c8a3133e00d8e6d8859dc7d, NAME => 'TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:22:39,125 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=17, memsize=22.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/.tmp/A/2d050878dd394b9e8a641bddca003329 2024-11-27T16:22:39,167 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/.tmp/B/a6e41b72aab143e08917e16b4b24f55d is 50, key is test_row_0/B:col10/1732724558265/Put/seqid=0 2024-11-27T16:22:39,178 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:22:39,179 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=101 2024-11-27T16:22:39,179 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d. 2024-11-27T16:22:39,179 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d. as already flushing 2024-11-27T16:22:39,179 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d. 2024-11-27T16:22:39,179 ERROR [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=101}] handler.RSProcedureHandler(58): pid=101 java.io.IOException: Unable to complete flush {ENCODED => a4d3cff76c8a3133e00d8e6d8859dc7d, NAME => 'TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:22:39,179 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=101 java.io.IOException: Unable to complete flush {ENCODED => a4d3cff76c8a3133e00d8e6d8859dc7d, NAME => 'TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:22:39,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4114): Remote procedure failed, pid=101 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a4d3cff76c8a3133e00d8e6d8859dc7d, NAME => 'TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a4d3cff76c8a3133e00d8e6d8859dc7d, NAME => 'TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:22:39,208 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742204_1380 (size=12001) 2024-11-27T16:22:39,225 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/.tmp/B/a6e41b72aab143e08917e16b4b24f55d 2024-11-27T16:22:39,268 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/.tmp/C/9a4aee30282444089190ef8701f6d740 is 50, key is test_row_0/C:col10/1732724558265/Put/seqid=0 2024-11-27T16:22:39,278 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742205_1381 (size=12001) 2024-11-27T16:22:39,332 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:22:39,332 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=101 2024-11-27T16:22:39,332 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d. 2024-11-27T16:22:39,332 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d. as already flushing 2024-11-27T16:22:39,332 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d. 2024-11-27T16:22:39,332 ERROR [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=101}] handler.RSProcedureHandler(58): pid=101 java.io.IOException: Unable to complete flush {ENCODED => a4d3cff76c8a3133e00d8e6d8859dc7d, NAME => 'TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:22:39,333 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=101 java.io.IOException: Unable to complete flush {ENCODED => a4d3cff76c8a3133e00d8e6d8859dc7d, NAME => 'TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:22:39,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4114): Remote procedure failed, pid=101 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a4d3cff76c8a3133e00d8e6d8859dc7d, NAME => 'TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a4d3cff76c8a3133e00d8e6d8859dc7d, NAME => 'TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:22:39,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=100 2024-11-27T16:22:39,413 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:39,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33122 deadline: 1732724619411, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:39,420 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:39,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33056 deadline: 1732724619419, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:39,425 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:39,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33092 deadline: 1732724619423, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:39,428 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:39,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33116 deadline: 1732724619426, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:39,429 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:39,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33110 deadline: 1732724619427, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:39,484 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:22:39,485 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=101 2024-11-27T16:22:39,485 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d. 2024-11-27T16:22:39,485 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d. as already flushing 2024-11-27T16:22:39,485 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d. 2024-11-27T16:22:39,485 ERROR [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] handler.RSProcedureHandler(58): pid=101 java.io.IOException: Unable to complete flush {ENCODED => a4d3cff76c8a3133e00d8e6d8859dc7d, NAME => 'TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:22:39,486 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=101 java.io.IOException: Unable to complete flush {ENCODED => a4d3cff76c8a3133e00d8e6d8859dc7d, NAME => 'TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:22:39,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4114): Remote procedure failed, pid=101 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a4d3cff76c8a3133e00d8e6d8859dc7d, NAME => 'TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a4d3cff76c8a3133e00d8e6d8859dc7d, NAME => 'TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:22:39,637 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:22:39,638 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=101 2024-11-27T16:22:39,638 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d. 2024-11-27T16:22:39,638 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d. as already flushing 2024-11-27T16:22:39,638 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d. 2024-11-27T16:22:39,638 ERROR [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=101}] handler.RSProcedureHandler(58): pid=101 java.io.IOException: Unable to complete flush {ENCODED => a4d3cff76c8a3133e00d8e6d8859dc7d, NAME => 'TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:22:39,638 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=101 java.io.IOException: Unable to complete flush {ENCODED => a4d3cff76c8a3133e00d8e6d8859dc7d, NAME => 'TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:22:39,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4114): Remote procedure failed, pid=101 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a4d3cff76c8a3133e00d8e6d8859dc7d, NAME => 'TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a4d3cff76c8a3133e00d8e6d8859dc7d, NAME => 'TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:22:39,679 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/.tmp/C/9a4aee30282444089190ef8701f6d740 2024-11-27T16:22:39,684 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/.tmp/A/2d050878dd394b9e8a641bddca003329 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/A/2d050878dd394b9e8a641bddca003329 2024-11-27T16:22:39,689 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/A/2d050878dd394b9e8a641bddca003329, entries=200, sequenceid=17, filesize=38.6 K 2024-11-27T16:22:39,690 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/.tmp/B/a6e41b72aab143e08917e16b4b24f55d as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/B/a6e41b72aab143e08917e16b4b24f55d 2024-11-27T16:22:39,693 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/B/a6e41b72aab143e08917e16b4b24f55d, entries=150, sequenceid=17, filesize=11.7 K 2024-11-27T16:22:39,694 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/.tmp/C/9a4aee30282444089190ef8701f6d740 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/C/9a4aee30282444089190ef8701f6d740 2024-11-27T16:22:39,697 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/C/9a4aee30282444089190ef8701f6d740, entries=150, sequenceid=17, filesize=11.7 K 2024-11-27T16:22:39,698 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=140.89 KB/144270 for a4d3cff76c8a3133e00d8e6d8859dc7d in 1425ms, sequenceid=17, compaction requested=false 2024-11-27T16:22:39,698 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for a4d3cff76c8a3133e00d8e6d8859dc7d: 2024-11-27T16:22:39,790 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:22:39,791 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=101 2024-11-27T16:22:39,791 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d. 2024-11-27T16:22:39,791 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.HRegion(2837): Flushing a4d3cff76c8a3133e00d8e6d8859dc7d 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-11-27T16:22:39,791 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a4d3cff76c8a3133e00d8e6d8859dc7d, store=A 2024-11-27T16:22:39,791 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:22:39,792 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a4d3cff76c8a3133e00d8e6d8859dc7d, store=B 2024-11-27T16:22:39,792 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:22:39,792 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a4d3cff76c8a3133e00d8e6d8859dc7d, store=C 2024-11-27T16:22:39,792 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:22:39,802 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=101}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411278328906cfffc4e9f8559aefbb3432342_a4d3cff76c8a3133e00d8e6d8859dc7d is 50, key is test_row_0/A:col10/1732724558293/Put/seqid=0 2024-11-27T16:22:39,814 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742206_1382 (size=12154) 2024-11-27T16:22:40,214 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=101}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:22:40,218 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411278328906cfffc4e9f8559aefbb3432342_a4d3cff76c8a3133e00d8e6d8859dc7d to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411278328906cfffc4e9f8559aefbb3432342_a4d3cff76c8a3133e00d8e6d8859dc7d 2024-11-27T16:22:40,219 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=101}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/.tmp/A/59677fd4c6f14020ac185d8413f48aab, store: [table=TestAcidGuarantees family=A region=a4d3cff76c8a3133e00d8e6d8859dc7d] 2024-11-27T16:22:40,220 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=101}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/.tmp/A/59677fd4c6f14020ac185d8413f48aab is 175, key is test_row_0/A:col10/1732724558293/Put/seqid=0 2024-11-27T16:22:40,226 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742207_1383 (size=30955) 2024-11-27T16:22:40,267 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-11-27T16:22:40,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=100 2024-11-27T16:22:40,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(8581): Flush requested on a4d3cff76c8a3133e00d8e6d8859dc7d 2024-11-27T16:22:40,423 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d. as already flushing 2024-11-27T16:22:40,439 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:40,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33056 deadline: 1732724620434, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:40,440 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:40,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33116 deadline: 1732724620435, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:40,440 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:40,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33092 deadline: 1732724620435, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:40,440 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:40,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33110 deadline: 1732724620436, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:40,440 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:40,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33122 deadline: 1732724620438, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:40,545 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:40,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33092 deadline: 1732724620541, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:40,546 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:40,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33056 deadline: 1732724620542, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:40,547 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:40,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33116 deadline: 1732724620542, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:40,547 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:40,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33122 deadline: 1732724620543, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:40,548 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:40,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33110 deadline: 1732724620543, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:40,625 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=101}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=41, memsize=47.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/.tmp/A/59677fd4c6f14020ac185d8413f48aab 2024-11-27T16:22:40,641 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=101}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/.tmp/B/527027039be241eeb55778bb597678c5 is 50, key is test_row_0/B:col10/1732724558293/Put/seqid=0 2024-11-27T16:22:40,655 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742208_1384 (size=12001) 2024-11-27T16:22:40,749 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:40,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33056 deadline: 1732724620747, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:40,749 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:40,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33092 deadline: 1732724620748, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:40,753 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:40,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33122 deadline: 1732724620748, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:40,753 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:40,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33116 deadline: 1732724620749, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:40,753 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:40,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33110 deadline: 1732724620750, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:41,054 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:41,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33092 deadline: 1732724621051, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:41,054 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:41,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33056 deadline: 1732724621052, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:41,056 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=41 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/.tmp/B/527027039be241eeb55778bb597678c5 2024-11-27T16:22:41,058 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:41,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33122 deadline: 1732724621054, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:41,060 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:41,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33110 deadline: 1732724621054, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:41,061 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:41,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33116 deadline: 1732724621055, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:41,062 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=101}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/.tmp/C/74990d0aa6034ec6b80d8011b8a654ad is 50, key is test_row_0/C:col10/1732724558293/Put/seqid=0 2024-11-27T16:22:41,068 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742209_1385 (size=12001) 2024-11-27T16:22:41,469 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=41 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/.tmp/C/74990d0aa6034ec6b80d8011b8a654ad 2024-11-27T16:22:41,481 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/.tmp/A/59677fd4c6f14020ac185d8413f48aab as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/A/59677fd4c6f14020ac185d8413f48aab 2024-11-27T16:22:41,486 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/A/59677fd4c6f14020ac185d8413f48aab, entries=150, sequenceid=41, filesize=30.2 K 2024-11-27T16:22:41,489 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/.tmp/B/527027039be241eeb55778bb597678c5 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/B/527027039be241eeb55778bb597678c5 2024-11-27T16:22:41,494 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/B/527027039be241eeb55778bb597678c5, entries=150, sequenceid=41, filesize=11.7 K 2024-11-27T16:22:41,495 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/.tmp/C/74990d0aa6034ec6b80d8011b8a654ad as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/C/74990d0aa6034ec6b80d8011b8a654ad 2024-11-27T16:22:41,499 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/C/74990d0aa6034ec6b80d8011b8a654ad, entries=150, sequenceid=41, filesize=11.7 K 2024-11-27T16:22:41,500 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=60.38 KB/61830 for a4d3cff76c8a3133e00d8e6d8859dc7d in 1709ms, sequenceid=41, compaction requested=false 2024-11-27T16:22:41,500 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.HRegion(2538): Flush status journal for a4d3cff76c8a3133e00d8e6d8859dc7d: 2024-11-27T16:22:41,500 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d. 2024-11-27T16:22:41,500 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=101 2024-11-27T16:22:41,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4106): Remote procedure done, pid=101 2024-11-27T16:22:41,503 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=101, resume processing ppid=100 2024-11-27T16:22:41,503 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=101, ppid=100, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 3.2390 sec 2024-11-27T16:22:41,504 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=100, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=100, table=TestAcidGuarantees in 3.2440 sec 2024-11-27T16:22:41,564 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing a4d3cff76c8a3133e00d8e6d8859dc7d 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-11-27T16:22:41,564 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a4d3cff76c8a3133e00d8e6d8859dc7d, store=A 2024-11-27T16:22:41,564 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:22:41,564 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a4d3cff76c8a3133e00d8e6d8859dc7d, store=B 2024-11-27T16:22:41,564 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:22:41,564 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a4d3cff76c8a3133e00d8e6d8859dc7d, store=C 2024-11-27T16:22:41,564 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:22:41,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(8581): Flush requested on a4d3cff76c8a3133e00d8e6d8859dc7d 2024-11-27T16:22:41,591 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411279e65c61160fd4387bcc98b3f8e9b8cdf_a4d3cff76c8a3133e00d8e6d8859dc7d is 50, key is test_row_0/A:col10/1732724560435/Put/seqid=0 2024-11-27T16:22:41,650 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742210_1386 (size=14594) 2024-11-27T16:22:41,651 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:22:41,655 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411279e65c61160fd4387bcc98b3f8e9b8cdf_a4d3cff76c8a3133e00d8e6d8859dc7d to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411279e65c61160fd4387bcc98b3f8e9b8cdf_a4d3cff76c8a3133e00d8e6d8859dc7d 2024-11-27T16:22:41,656 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/.tmp/A/72b30d8d4d1449bf9362760aa80d3b21, store: [table=TestAcidGuarantees family=A region=a4d3cff76c8a3133e00d8e6d8859dc7d] 2024-11-27T16:22:41,657 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/.tmp/A/72b30d8d4d1449bf9362760aa80d3b21 is 175, key is test_row_0/A:col10/1732724560435/Put/seqid=0 2024-11-27T16:22:41,697 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742211_1387 (size=39549) 2024-11-27T16:22:41,699 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=54, memsize=22.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/.tmp/A/72b30d8d4d1449bf9362760aa80d3b21 2024-11-27T16:22:41,700 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:41,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33116 deadline: 1732724621674, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:41,718 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/.tmp/B/0cfb2362836d4a918cc402554c49f548 is 50, key is test_row_0/B:col10/1732724560435/Put/seqid=0 2024-11-27T16:22:41,724 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:41,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33056 deadline: 1732724621691, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:41,726 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:41,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33122 deadline: 1732724621700, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:41,727 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:41,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33110 deadline: 1732724621700, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:41,729 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:41,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33092 deadline: 1732724621707, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:41,784 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742212_1388 (size=12001) 2024-11-27T16:22:41,808 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:41,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33116 deadline: 1732724621802, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:41,830 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:41,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33056 deadline: 1732724621825, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:41,835 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:41,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33110 deadline: 1732724621829, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:41,835 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:41,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33092 deadline: 1732724621830, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:41,853 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:41,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33122 deadline: 1732724621842, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:42,018 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:42,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33116 deadline: 1732724622011, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:42,036 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:42,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33056 deadline: 1732724622032, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:42,049 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:42,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33110 deadline: 1732724622037, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:42,049 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:42,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33092 deadline: 1732724622038, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:42,067 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:42,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33122 deadline: 1732724622056, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:42,185 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=54 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/.tmp/B/0cfb2362836d4a918cc402554c49f548 2024-11-27T16:22:42,208 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/.tmp/C/07a4349ac0b04da681563c633f801a4a is 50, key is test_row_0/C:col10/1732724560435/Put/seqid=0 2024-11-27T16:22:42,253 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742213_1389 (size=12001) 2024-11-27T16:22:42,254 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=54 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/.tmp/C/07a4349ac0b04da681563c633f801a4a 2024-11-27T16:22:42,261 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/.tmp/A/72b30d8d4d1449bf9362760aa80d3b21 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/A/72b30d8d4d1449bf9362760aa80d3b21 2024-11-27T16:22:42,267 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/A/72b30d8d4d1449bf9362760aa80d3b21, entries=200, sequenceid=54, filesize=38.6 K 2024-11-27T16:22:42,268 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/.tmp/B/0cfb2362836d4a918cc402554c49f548 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/B/0cfb2362836d4a918cc402554c49f548 2024-11-27T16:22:42,273 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/B/0cfb2362836d4a918cc402554c49f548, entries=150, sequenceid=54, filesize=11.7 K 2024-11-27T16:22:42,273 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/.tmp/C/07a4349ac0b04da681563c633f801a4a as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/C/07a4349ac0b04da681563c633f801a4a 2024-11-27T16:22:42,277 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/C/07a4349ac0b04da681563c633f801a4a, entries=150, sequenceid=54, filesize=11.7 K 2024-11-27T16:22:42,278 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=140.89 KB/144270 for a4d3cff76c8a3133e00d8e6d8859dc7d in 715ms, sequenceid=54, compaction requested=true 2024-11-27T16:22:42,278 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for a4d3cff76c8a3133e00d8e6d8859dc7d: 2024-11-27T16:22:42,278 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a4d3cff76c8a3133e00d8e6d8859dc7d:A, priority=-2147483648, current under compaction store size is 1 2024-11-27T16:22:42,278 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T16:22:42,278 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a4d3cff76c8a3133e00d8e6d8859dc7d:B, priority=-2147483648, current under compaction store size is 2 2024-11-27T16:22:42,278 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T16:22:42,278 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-27T16:22:42,278 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a4d3cff76c8a3133e00d8e6d8859dc7d:C, priority=-2147483648, current under compaction store size is 3 2024-11-27T16:22:42,278 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-27T16:22:42,278 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-27T16:22:42,279 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 110053 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-27T16:22:42,279 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-27T16:22:42,280 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HStore(1540): a4d3cff76c8a3133e00d8e6d8859dc7d/A is initiating minor compaction (all files) 2024-11-27T16:22:42,280 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HStore(1540): a4d3cff76c8a3133e00d8e6d8859dc7d/B is initiating minor compaction (all files) 2024-11-27T16:22:42,280 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of a4d3cff76c8a3133e00d8e6d8859dc7d/B in TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d. 2024-11-27T16:22:42,280 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of a4d3cff76c8a3133e00d8e6d8859dc7d/A in TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d. 2024-11-27T16:22:42,280 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/B/a6e41b72aab143e08917e16b4b24f55d, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/B/527027039be241eeb55778bb597678c5, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/B/0cfb2362836d4a918cc402554c49f548] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/.tmp, totalSize=35.2 K 2024-11-27T16:22:42,280 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/A/2d050878dd394b9e8a641bddca003329, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/A/59677fd4c6f14020ac185d8413f48aab, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/A/72b30d8d4d1449bf9362760aa80d3b21] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/.tmp, totalSize=107.5 K 2024-11-27T16:22:42,280 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=53.85 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d. 2024-11-27T16:22:42,280 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d. files: [hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/A/2d050878dd394b9e8a641bddca003329, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/A/59677fd4c6f14020ac185d8413f48aab, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/A/72b30d8d4d1449bf9362760aa80d3b21] 2024-11-27T16:22:42,280 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting a6e41b72aab143e08917e16b4b24f55d, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=17, earliestPutTs=1732724558265 2024-11-27T16:22:42,281 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2d050878dd394b9e8a641bddca003329, keycount=200, bloomtype=ROW, size=38.6 K, encoding=NONE, compression=NONE, seqNum=17, earliestPutTs=1732724558265 2024-11-27T16:22:42,281 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting 527027039be241eeb55778bb597678c5, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=41, earliestPutTs=1732724558289 2024-11-27T16:22:42,282 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 59677fd4c6f14020ac185d8413f48aab, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=41, earliestPutTs=1732724558289 2024-11-27T16:22:42,282 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting 0cfb2362836d4a918cc402554c49f548, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=54, earliestPutTs=1732724560432 2024-11-27T16:22:42,282 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 72b30d8d4d1449bf9362760aa80d3b21, keycount=200, bloomtype=ROW, size=38.6 K, encoding=NONE, compression=NONE, seqNum=54, earliestPutTs=1732724560432 2024-11-27T16:22:42,326 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): a4d3cff76c8a3133e00d8e6d8859dc7d#B#compaction#333 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 53.85 MB/second 2024-11-27T16:22:42,327 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/.tmp/B/193da816a3d44c58bd88e88cfa947f9f is 50, key is test_row_0/B:col10/1732724560435/Put/seqid=0 2024-11-27T16:22:42,332 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=a4d3cff76c8a3133e00d8e6d8859dc7d] 2024-11-27T16:22:42,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(8581): Flush requested on a4d3cff76c8a3133e00d8e6d8859dc7d 2024-11-27T16:22:42,341 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing a4d3cff76c8a3133e00d8e6d8859dc7d 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-27T16:22:42,341 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a4d3cff76c8a3133e00d8e6d8859dc7d, store=A 2024-11-27T16:22:42,341 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:22:42,341 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a4d3cff76c8a3133e00d8e6d8859dc7d, store=B 2024-11-27T16:22:42,341 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:22:42,341 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a4d3cff76c8a3133e00d8e6d8859dc7d, store=C 2024-11-27T16:22:42,341 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:22:42,353 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202411275f33ae1c40c244f991a55343779a18c1_a4d3cff76c8a3133e00d8e6d8859dc7d store=[table=TestAcidGuarantees family=A region=a4d3cff76c8a3133e00d8e6d8859dc7d] 2024-11-27T16:22:42,355 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202411275f33ae1c40c244f991a55343779a18c1_a4d3cff76c8a3133e00d8e6d8859dc7d, store=[table=TestAcidGuarantees family=A region=a4d3cff76c8a3133e00d8e6d8859dc7d] 2024-11-27T16:22:42,355 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411275f33ae1c40c244f991a55343779a18c1_a4d3cff76c8a3133e00d8e6d8859dc7d because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=a4d3cff76c8a3133e00d8e6d8859dc7d] 2024-11-27T16:22:42,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=100 2024-11-27T16:22:42,367 INFO [Thread-1701 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 100 completed 2024-11-27T16:22:42,368 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-27T16:22:42,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] procedure2.ProcedureExecutor(1098): Stored pid=102, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=102, table=TestAcidGuarantees 2024-11-27T16:22:42,371 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=102, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=102, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-27T16:22:42,372 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=102, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=102, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-27T16:22:42,372 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=103, ppid=102, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-27T16:22:42,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=102 2024-11-27T16:22:42,384 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742214_1390 (size=12104) 2024-11-27T16:22:42,391 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/.tmp/B/193da816a3d44c58bd88e88cfa947f9f as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/B/193da816a3d44c58bd88e88cfa947f9f 2024-11-27T16:22:42,396 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in a4d3cff76c8a3133e00d8e6d8859dc7d/B of a4d3cff76c8a3133e00d8e6d8859dc7d into 193da816a3d44c58bd88e88cfa947f9f(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T16:22:42,396 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for a4d3cff76c8a3133e00d8e6d8859dc7d: 2024-11-27T16:22:42,396 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d., storeName=a4d3cff76c8a3133e00d8e6d8859dc7d/B, priority=13, startTime=1732724562278; duration=0sec 2024-11-27T16:22:42,396 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-27T16:22:42,396 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a4d3cff76c8a3133e00d8e6d8859dc7d:B 2024-11-27T16:22:42,396 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-27T16:22:42,397 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-27T16:22:42,397 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HStore(1540): a4d3cff76c8a3133e00d8e6d8859dc7d/C is initiating minor compaction (all files) 2024-11-27T16:22:42,397 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of a4d3cff76c8a3133e00d8e6d8859dc7d/C in TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d. 2024-11-27T16:22:42,397 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/C/9a4aee30282444089190ef8701f6d740, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/C/74990d0aa6034ec6b80d8011b8a654ad, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/C/07a4349ac0b04da681563c633f801a4a] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/.tmp, totalSize=35.2 K 2024-11-27T16:22:42,399 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting 9a4aee30282444089190ef8701f6d740, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=17, earliestPutTs=1732724558265 2024-11-27T16:22:42,400 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting 74990d0aa6034ec6b80d8011b8a654ad, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=41, earliestPutTs=1732724558289 2024-11-27T16:22:42,401 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting 07a4349ac0b04da681563c633f801a4a, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=54, earliestPutTs=1732724560432 2024-11-27T16:22:42,406 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411271b33f643cfdc4e80a5e6fac9e25388d0_a4d3cff76c8a3133e00d8e6d8859dc7d is 50, key is test_row_0/A:col10/1732724561697/Put/seqid=0 2024-11-27T16:22:42,407 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:42,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33122 deadline: 1732724622370, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:42,412 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:42,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33056 deadline: 1732724622393, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:42,425 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): a4d3cff76c8a3133e00d8e6d8859dc7d#C#compaction#336 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 53.85 MB/second 2024-11-27T16:22:42,426 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/.tmp/C/7edda95080ab4e218a71dbabbcd39787 is 50, key is test_row_0/C:col10/1732724560435/Put/seqid=0 2024-11-27T16:22:42,430 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:42,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33110 deadline: 1732724622398, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:42,432 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:42,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33116 deadline: 1732724622399, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:42,432 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:42,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33092 deadline: 1732724622401, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:42,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=102 2024-11-27T16:22:42,479 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742215_1391 (size=4469) 2024-11-27T16:22:42,480 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): a4d3cff76c8a3133e00d8e6d8859dc7d#A#compaction#334 average throughput is 0.17 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 53.85 MB/second 2024-11-27T16:22:42,481 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/.tmp/A/56af902700004c05bcb5516401f36fb1 is 175, key is test_row_0/A:col10/1732724560435/Put/seqid=0 2024-11-27T16:22:42,507 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742216_1392 (size=14594) 2024-11-27T16:22:42,510 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:22:42,515 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411271b33f643cfdc4e80a5e6fac9e25388d0_a4d3cff76c8a3133e00d8e6d8859dc7d to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411271b33f643cfdc4e80a5e6fac9e25388d0_a4d3cff76c8a3133e00d8e6d8859dc7d 2024-11-27T16:22:42,516 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/.tmp/A/1a8deb7377194e28a2fb426e3549606a, store: [table=TestAcidGuarantees family=A region=a4d3cff76c8a3133e00d8e6d8859dc7d] 2024-11-27T16:22:42,517 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/.tmp/A/1a8deb7377194e28a2fb426e3549606a is 175, key is test_row_0/A:col10/1732724561697/Put/seqid=0 2024-11-27T16:22:42,522 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:42,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33056 deadline: 1732724622514, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:42,528 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:22:42,529 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=103 2024-11-27T16:22:42,529 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d. 2024-11-27T16:22:42,529 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d. as already flushing 2024-11-27T16:22:42,529 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d. 2024-11-27T16:22:42,529 ERROR [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] handler.RSProcedureHandler(58): pid=103 java.io.IOException: Unable to complete flush {ENCODED => a4d3cff76c8a3133e00d8e6d8859dc7d, NAME => 'TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:22:42,529 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=103 java.io.IOException: Unable to complete flush {ENCODED => a4d3cff76c8a3133e00d8e6d8859dc7d, NAME => 'TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:22:42,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4114): Remote procedure failed, pid=103 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a4d3cff76c8a3133e00d8e6d8859dc7d, NAME => 'TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a4d3cff76c8a3133e00d8e6d8859dc7d, NAME => 'TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:22:42,535 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742217_1393 (size=12104) 2024-11-27T16:22:42,549 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:42,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33110 deadline: 1732724622533, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:42,550 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:42,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33092 deadline: 1732724622534, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:42,551 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:42,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33116 deadline: 1732724622534, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:42,573 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742218_1394 (size=31058) 2024-11-27T16:22:42,581 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/.tmp/A/56af902700004c05bcb5516401f36fb1 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/A/56af902700004c05bcb5516401f36fb1 2024-11-27T16:22:42,583 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742219_1395 (size=39549) 2024-11-27T16:22:42,584 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=79, memsize=49.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/.tmp/A/1a8deb7377194e28a2fb426e3549606a 2024-11-27T16:22:42,591 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in a4d3cff76c8a3133e00d8e6d8859dc7d/A of a4d3cff76c8a3133e00d8e6d8859dc7d into 56af902700004c05bcb5516401f36fb1(size=30.3 K), total size for store is 30.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T16:22:42,591 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for a4d3cff76c8a3133e00d8e6d8859dc7d: 2024-11-27T16:22:42,591 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d., storeName=a4d3cff76c8a3133e00d8e6d8859dc7d/A, priority=13, startTime=1732724562278; duration=0sec 2024-11-27T16:22:42,591 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T16:22:42,591 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a4d3cff76c8a3133e00d8e6d8859dc7d:A 2024-11-27T16:22:42,612 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/.tmp/B/f4ce771570cc4e288aa047d8eefa57d7 is 50, key is test_row_0/B:col10/1732724561697/Put/seqid=0 2024-11-27T16:22:42,639 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742220_1396 (size=12001) 2024-11-27T16:22:42,641 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=79 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/.tmp/B/f4ce771570cc4e288aa047d8eefa57d7 2024-11-27T16:22:42,670 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/.tmp/C/baf4d3d663f14f2bba64083142582e86 is 50, key is test_row_0/C:col10/1732724561697/Put/seqid=0 2024-11-27T16:22:42,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=102 2024-11-27T16:22:42,683 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:22:42,684 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=103 2024-11-27T16:22:42,684 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d. 2024-11-27T16:22:42,684 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d. as already flushing 2024-11-27T16:22:42,685 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d. 2024-11-27T16:22:42,685 ERROR [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] handler.RSProcedureHandler(58): pid=103 java.io.IOException: Unable to complete flush {ENCODED => a4d3cff76c8a3133e00d8e6d8859dc7d, NAME => 'TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:22:42,688 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=103 java.io.IOException: Unable to complete flush {ENCODED => a4d3cff76c8a3133e00d8e6d8859dc7d, NAME => 'TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:22:42,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4114): Remote procedure failed, pid=103 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a4d3cff76c8a3133e00d8e6d8859dc7d, NAME => 'TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a4d3cff76c8a3133e00d8e6d8859dc7d, NAME => 'TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:22:42,725 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742221_1397 (size=12001) 2024-11-27T16:22:42,728 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=79 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/.tmp/C/baf4d3d663f14f2bba64083142582e86 2024-11-27T16:22:42,734 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/.tmp/A/1a8deb7377194e28a2fb426e3549606a as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/A/1a8deb7377194e28a2fb426e3549606a 2024-11-27T16:22:42,739 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:42,740 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/A/1a8deb7377194e28a2fb426e3549606a, entries=200, sequenceid=79, filesize=38.6 K 2024-11-27T16:22:42,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33056 deadline: 1732724622724, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:42,742 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/.tmp/B/f4ce771570cc4e288aa047d8eefa57d7 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/B/f4ce771570cc4e288aa047d8eefa57d7 2024-11-27T16:22:42,752 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/B/f4ce771570cc4e288aa047d8eefa57d7, entries=150, sequenceid=79, filesize=11.7 K 2024-11-27T16:22:42,753 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/.tmp/C/baf4d3d663f14f2bba64083142582e86 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/C/baf4d3d663f14f2bba64083142582e86 2024-11-27T16:22:42,757 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/C/baf4d3d663f14f2bba64083142582e86, entries=150, sequenceid=79, filesize=11.7 K 2024-11-27T16:22:42,758 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for a4d3cff76c8a3133e00d8e6d8859dc7d in 417ms, sequenceid=79, compaction requested=false 2024-11-27T16:22:42,758 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for a4d3cff76c8a3133e00d8e6d8859dc7d: 2024-11-27T16:22:42,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(8581): Flush requested on a4d3cff76c8a3133e00d8e6d8859dc7d 2024-11-27T16:22:42,778 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing a4d3cff76c8a3133e00d8e6d8859dc7d 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-11-27T16:22:42,778 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a4d3cff76c8a3133e00d8e6d8859dc7d, store=A 2024-11-27T16:22:42,778 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:22:42,778 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a4d3cff76c8a3133e00d8e6d8859dc7d, store=B 2024-11-27T16:22:42,778 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:22:42,778 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a4d3cff76c8a3133e00d8e6d8859dc7d, store=C 2024-11-27T16:22:42,778 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:22:42,817 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241127114af0ec9c10455387e601166c4de7d1_a4d3cff76c8a3133e00d8e6d8859dc7d is 50, key is test_row_0/A:col10/1732724562394/Put/seqid=0 2024-11-27T16:22:42,840 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:22:42,841 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=103 2024-11-27T16:22:42,841 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d. 2024-11-27T16:22:42,841 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d. as already flushing 2024-11-27T16:22:42,841 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d. 2024-11-27T16:22:42,841 ERROR [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] handler.RSProcedureHandler(58): pid=103 java.io.IOException: Unable to complete flush {ENCODED => a4d3cff76c8a3133e00d8e6d8859dc7d, NAME => 'TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:22:42,841 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=103 java.io.IOException: Unable to complete flush {ENCODED => a4d3cff76c8a3133e00d8e6d8859dc7d, NAME => 'TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:22:42,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4114): Remote procedure failed, pid=103 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a4d3cff76c8a3133e00d8e6d8859dc7d, NAME => 'TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a4d3cff76c8a3133e00d8e6d8859dc7d, NAME => 'TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:22:42,879 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742222_1398 (size=14594) 2024-11-27T16:22:42,924 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:42,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33122 deadline: 1732724622912, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:42,925 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:42,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33110 deadline: 1732724622913, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:42,937 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:42,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33092 deadline: 1732724622914, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:42,938 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:42,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33116 deadline: 1732724622915, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:42,940 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/.tmp/C/7edda95080ab4e218a71dbabbcd39787 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/C/7edda95080ab4e218a71dbabbcd39787 2024-11-27T16:22:42,945 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in a4d3cff76c8a3133e00d8e6d8859dc7d/C of a4d3cff76c8a3133e00d8e6d8859dc7d into 7edda95080ab4e218a71dbabbcd39787(size=11.8 K), total size for store is 23.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T16:22:42,945 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for a4d3cff76c8a3133e00d8e6d8859dc7d: 2024-11-27T16:22:42,945 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d., storeName=a4d3cff76c8a3133e00d8e6d8859dc7d/C, priority=13, startTime=1732724562278; duration=0sec 2024-11-27T16:22:42,945 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T16:22:42,945 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a4d3cff76c8a3133e00d8e6d8859dc7d:C 2024-11-27T16:22:42,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=102 2024-11-27T16:22:43,003 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:22:43,003 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=103 2024-11-27T16:22:43,004 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d. 2024-11-27T16:22:43,004 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d. as already flushing 2024-11-27T16:22:43,004 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d. 2024-11-27T16:22:43,004 ERROR [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] handler.RSProcedureHandler(58): pid=103 java.io.IOException: Unable to complete flush {ENCODED => a4d3cff76c8a3133e00d8e6d8859dc7d, NAME => 'TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:22:43,004 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=103 java.io.IOException: Unable to complete flush {ENCODED => a4d3cff76c8a3133e00d8e6d8859dc7d, NAME => 'TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:22:43,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4114): Remote procedure failed, pid=103 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a4d3cff76c8a3133e00d8e6d8859dc7d, NAME => 'TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a4d3cff76c8a3133e00d8e6d8859dc7d, NAME => 'TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:22:43,037 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:43,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33110 deadline: 1732724623027, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:43,046 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:43,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33092 deadline: 1732724623040, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:43,054 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:43,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33116 deadline: 1732724623041, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:43,055 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:43,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33056 deadline: 1732724623042, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:43,156 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:22:43,157 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=103 2024-11-27T16:22:43,157 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d. 2024-11-27T16:22:43,157 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d. as already flushing 2024-11-27T16:22:43,157 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d. 2024-11-27T16:22:43,157 ERROR [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] handler.RSProcedureHandler(58): pid=103 java.io.IOException: Unable to complete flush {ENCODED => a4d3cff76c8a3133e00d8e6d8859dc7d, NAME => 'TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:22:43,158 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=103 java.io.IOException: Unable to complete flush {ENCODED => a4d3cff76c8a3133e00d8e6d8859dc7d, NAME => 'TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:22:43,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4114): Remote procedure failed, pid=103 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a4d3cff76c8a3133e00d8e6d8859dc7d, NAME => 'TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a4d3cff76c8a3133e00d8e6d8859dc7d, NAME => 'TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:22:43,254 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:43,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33110 deadline: 1732724623247, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:43,255 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:43,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33092 deadline: 1732724623249, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:43,263 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:43,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33116 deadline: 1732724623255, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:43,280 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:22:43,289 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241127114af0ec9c10455387e601166c4de7d1_a4d3cff76c8a3133e00d8e6d8859dc7d to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241127114af0ec9c10455387e601166c4de7d1_a4d3cff76c8a3133e00d8e6d8859dc7d 2024-11-27T16:22:43,291 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/.tmp/A/b01c5c52b34f431298ec34d09c4e13df, store: [table=TestAcidGuarantees family=A region=a4d3cff76c8a3133e00d8e6d8859dc7d] 2024-11-27T16:22:43,291 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/.tmp/A/b01c5c52b34f431298ec34d09c4e13df is 175, key is test_row_0/A:col10/1732724562394/Put/seqid=0 2024-11-27T16:22:43,309 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:22:43,309 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=103 2024-11-27T16:22:43,310 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d. 2024-11-27T16:22:43,310 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d. as already flushing 2024-11-27T16:22:43,310 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d. 2024-11-27T16:22:43,310 ERROR [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] handler.RSProcedureHandler(58): pid=103 java.io.IOException: Unable to complete flush {ENCODED => a4d3cff76c8a3133e00d8e6d8859dc7d, NAME => 'TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:22:43,310 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=103 java.io.IOException: Unable to complete flush {ENCODED => a4d3cff76c8a3133e00d8e6d8859dc7d, NAME => 'TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:22:43,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4114): Remote procedure failed, pid=103 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a4d3cff76c8a3133e00d8e6d8859dc7d, NAME => 'TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a4d3cff76c8a3133e00d8e6d8859dc7d, NAME => 'TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:22:43,335 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742223_1399 (size=39549) 2024-11-27T16:22:43,335 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=93, memsize=20.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/.tmp/A/b01c5c52b34f431298ec34d09c4e13df 2024-11-27T16:22:43,343 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/.tmp/B/18beb8a570f64a908a850c8879e83035 is 50, key is test_row_0/B:col10/1732724562394/Put/seqid=0 2024-11-27T16:22:43,376 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742224_1400 (size=12001) 2024-11-27T16:22:43,377 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=93 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/.tmp/B/18beb8a570f64a908a850c8879e83035 2024-11-27T16:22:43,386 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/.tmp/C/511d8adf98be44ceafaed61a8f9ba971 is 50, key is test_row_0/C:col10/1732724562394/Put/seqid=0 2024-11-27T16:22:43,411 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742225_1401 (size=12001) 2024-11-27T16:22:43,412 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=93 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/.tmp/C/511d8adf98be44ceafaed61a8f9ba971 2024-11-27T16:22:43,419 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/.tmp/A/b01c5c52b34f431298ec34d09c4e13df as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/A/b01c5c52b34f431298ec34d09c4e13df 2024-11-27T16:22:43,424 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/A/b01c5c52b34f431298ec34d09c4e13df, entries=200, sequenceid=93, filesize=38.6 K 2024-11-27T16:22:43,425 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/.tmp/B/18beb8a570f64a908a850c8879e83035 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/B/18beb8a570f64a908a850c8879e83035 2024-11-27T16:22:43,429 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/B/18beb8a570f64a908a850c8879e83035, entries=150, sequenceid=93, filesize=11.7 K 2024-11-27T16:22:43,431 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/.tmp/C/511d8adf98be44ceafaed61a8f9ba971 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/C/511d8adf98be44ceafaed61a8f9ba971 2024-11-27T16:22:43,452 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/C/511d8adf98be44ceafaed61a8f9ba971, entries=150, sequenceid=93, filesize=11.7 K 2024-11-27T16:22:43,453 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for a4d3cff76c8a3133e00d8e6d8859dc7d in 676ms, sequenceid=93, compaction requested=true 2024-11-27T16:22:43,454 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for a4d3cff76c8a3133e00d8e6d8859dc7d: 2024-11-27T16:22:43,454 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-27T16:22:43,454 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a4d3cff76c8a3133e00d8e6d8859dc7d:A, priority=-2147483648, current under compaction store size is 1 2024-11-27T16:22:43,454 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T16:22:43,454 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-27T16:22:43,454 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a4d3cff76c8a3133e00d8e6d8859dc7d:B, priority=-2147483648, current under compaction store size is 2 2024-11-27T16:22:43,454 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T16:22:43,454 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a4d3cff76c8a3133e00d8e6d8859dc7d:C, priority=-2147483648, current under compaction store size is 3 2024-11-27T16:22:43,455 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-27T16:22:43,455 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 110156 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-27T16:22:43,455 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HStore(1540): a4d3cff76c8a3133e00d8e6d8859dc7d/A is initiating minor compaction (all files) 2024-11-27T16:22:43,455 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of a4d3cff76c8a3133e00d8e6d8859dc7d/A in TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d. 2024-11-27T16:22:43,455 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/A/56af902700004c05bcb5516401f36fb1, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/A/1a8deb7377194e28a2fb426e3549606a, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/A/b01c5c52b34f431298ec34d09c4e13df] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/.tmp, totalSize=107.6 K 2024-11-27T16:22:43,456 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=53.85 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d. 2024-11-27T16:22:43,456 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d. files: [hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/A/56af902700004c05bcb5516401f36fb1, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/A/1a8deb7377194e28a2fb426e3549606a, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/A/b01c5c52b34f431298ec34d09c4e13df] 2024-11-27T16:22:43,456 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-27T16:22:43,456 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HStore(1540): a4d3cff76c8a3133e00d8e6d8859dc7d/B is initiating minor compaction (all files) 2024-11-27T16:22:43,456 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of a4d3cff76c8a3133e00d8e6d8859dc7d/B in TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d. 2024-11-27T16:22:43,456 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/B/193da816a3d44c58bd88e88cfa947f9f, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/B/f4ce771570cc4e288aa047d8eefa57d7, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/B/18beb8a570f64a908a850c8879e83035] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/.tmp, totalSize=35.3 K 2024-11-27T16:22:43,457 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 56af902700004c05bcb5516401f36fb1, keycount=150, bloomtype=ROW, size=30.3 K, encoding=NONE, compression=NONE, seqNum=54, earliestPutTs=1732724560432 2024-11-27T16:22:43,457 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting 193da816a3d44c58bd88e88cfa947f9f, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=54, earliestPutTs=1732724560432 2024-11-27T16:22:43,457 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1a8deb7377194e28a2fb426e3549606a, keycount=200, bloomtype=ROW, size=38.6 K, encoding=NONE, compression=NONE, seqNum=79, earliestPutTs=1732724561609 2024-11-27T16:22:43,457 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting f4ce771570cc4e288aa047d8eefa57d7, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=79, earliestPutTs=1732724561692 2024-11-27T16:22:43,458 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.Compactor(224): Compacting b01c5c52b34f431298ec34d09c4e13df, keycount=200, bloomtype=ROW, size=38.6 K, encoding=NONE, compression=NONE, seqNum=93, earliestPutTs=1732724562359 2024-11-27T16:22:43,458 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting 18beb8a570f64a908a850c8879e83035, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=93, earliestPutTs=1732724562359 2024-11-27T16:22:43,462 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:22:43,463 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=103 2024-11-27T16:22:43,463 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d. 2024-11-27T16:22:43,463 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HRegion(2837): Flushing a4d3cff76c8a3133e00d8e6d8859dc7d 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-11-27T16:22:43,463 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a4d3cff76c8a3133e00d8e6d8859dc7d, store=A 2024-11-27T16:22:43,464 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:22:43,464 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a4d3cff76c8a3133e00d8e6d8859dc7d, store=B 2024-11-27T16:22:43,464 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:22:43,464 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a4d3cff76c8a3133e00d8e6d8859dc7d, store=C 2024-11-27T16:22:43,464 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:22:43,478 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): a4d3cff76c8a3133e00d8e6d8859dc7d#B#compaction#342 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 53.85 MB/second 2024-11-27T16:22:43,479 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/.tmp/B/45a4a193933349e490daeb751c058285 is 50, key is test_row_0/B:col10/1732724562394/Put/seqid=0 2024-11-27T16:22:43,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=102 2024-11-27T16:22:43,487 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=a4d3cff76c8a3133e00d8e6d8859dc7d] 2024-11-27T16:22:43,510 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241127140e34f0051d4c1682b36463e2fb0810_a4d3cff76c8a3133e00d8e6d8859dc7d store=[table=TestAcidGuarantees family=A region=a4d3cff76c8a3133e00d8e6d8859dc7d] 2024-11-27T16:22:43,512 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241127140e34f0051d4c1682b36463e2fb0810_a4d3cff76c8a3133e00d8e6d8859dc7d, store=[table=TestAcidGuarantees family=A region=a4d3cff76c8a3133e00d8e6d8859dc7d] 2024-11-27T16:22:43,513 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241127140e34f0051d4c1682b36463e2fb0810_a4d3cff76c8a3133e00d8e6d8859dc7d because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=a4d3cff76c8a3133e00d8e6d8859dc7d] 2024-11-27T16:22:43,525 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411270371e0fc015a423e8aeb15327803cef2_a4d3cff76c8a3133e00d8e6d8859dc7d is 50, key is test_row_0/A:col10/1732724562907/Put/seqid=0 2024-11-27T16:22:43,551 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742226_1402 (size=12207) 2024-11-27T16:22:43,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(8581): Flush requested on a4d3cff76c8a3133e00d8e6d8859dc7d 2024-11-27T16:22:43,566 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d. as already flushing 2024-11-27T16:22:43,586 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742227_1403 (size=4469) 2024-11-27T16:22:43,601 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742228_1404 (size=12154) 2024-11-27T16:22:43,603 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:22:43,608 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411270371e0fc015a423e8aeb15327803cef2_a4d3cff76c8a3133e00d8e6d8859dc7d to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411270371e0fc015a423e8aeb15327803cef2_a4d3cff76c8a3133e00d8e6d8859dc7d 2024-11-27T16:22:43,609 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/.tmp/A/ebb5179562b84a52aa6739404cba4e5c, store: [table=TestAcidGuarantees family=A region=a4d3cff76c8a3133e00d8e6d8859dc7d] 2024-11-27T16:22:43,610 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/.tmp/A/ebb5179562b84a52aa6739404cba4e5c is 175, key is test_row_0/A:col10/1732724562907/Put/seqid=0 2024-11-27T16:22:43,612 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:43,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33116 deadline: 1732724623601, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:43,624 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:43,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33110 deadline: 1732724623604, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:43,624 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:43,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33092 deadline: 1732724623604, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:43,625 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:43,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33056 deadline: 1732724623610, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:43,657 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742229_1405 (size=30955) 2024-11-27T16:22:43,727 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:43,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33116 deadline: 1732724623714, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:43,737 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:43,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33056 deadline: 1732724623726, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:43,737 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:43,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33092 deadline: 1732724623726, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:43,738 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:43,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33110 deadline: 1732724623727, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:43,942 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:43,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33116 deadline: 1732724623929, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:43,948 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:43,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33122 deadline: 1732724623940, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:43,949 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:43,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33110 deadline: 1732724623940, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:43,956 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:43,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33056 deadline: 1732724623944, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:43,957 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/.tmp/B/45a4a193933349e490daeb751c058285 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/B/45a4a193933349e490daeb751c058285 2024-11-27T16:22:43,957 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:43,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33092 deadline: 1732724623945, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:43,961 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in a4d3cff76c8a3133e00d8e6d8859dc7d/B of a4d3cff76c8a3133e00d8e6d8859dc7d into 45a4a193933349e490daeb751c058285(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T16:22:43,961 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for a4d3cff76c8a3133e00d8e6d8859dc7d: 2024-11-27T16:22:43,961 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d., storeName=a4d3cff76c8a3133e00d8e6d8859dc7d/B, priority=13, startTime=1732724563454; duration=0sec 2024-11-27T16:22:43,961 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-27T16:22:43,961 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a4d3cff76c8a3133e00d8e6d8859dc7d:B 2024-11-27T16:22:43,962 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-27T16:22:43,962 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-27T16:22:43,963 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HStore(1540): a4d3cff76c8a3133e00d8e6d8859dc7d/C is initiating minor compaction (all files) 2024-11-27T16:22:43,963 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of a4d3cff76c8a3133e00d8e6d8859dc7d/C in TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d. 2024-11-27T16:22:43,963 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/C/7edda95080ab4e218a71dbabbcd39787, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/C/baf4d3d663f14f2bba64083142582e86, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/C/511d8adf98be44ceafaed61a8f9ba971] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/.tmp, totalSize=35.3 K 2024-11-27T16:22:43,963 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting 7edda95080ab4e218a71dbabbcd39787, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=54, earliestPutTs=1732724560432 2024-11-27T16:22:43,963 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting baf4d3d663f14f2bba64083142582e86, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=79, earliestPutTs=1732724561692 2024-11-27T16:22:43,964 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting 511d8adf98be44ceafaed61a8f9ba971, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=93, earliestPutTs=1732724562359 2024-11-27T16:22:43,984 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): a4d3cff76c8a3133e00d8e6d8859dc7d#C#compaction#345 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 53.85 MB/second 2024-11-27T16:22:43,984 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/.tmp/C/7d894d5d75e642bd80d672d695c631c9 is 50, key is test_row_0/C:col10/1732724562394/Put/seqid=0 2024-11-27T16:22:43,990 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): a4d3cff76c8a3133e00d8e6d8859dc7d#A#compaction#343 average throughput is 0.05 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 53.85 MB/second 2024-11-27T16:22:43,991 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/.tmp/A/104288c612004449bc800085c9f052fe is 175, key is test_row_0/A:col10/1732724562394/Put/seqid=0 2024-11-27T16:22:44,018 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742230_1406 (size=12207) 2024-11-27T16:22:44,023 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/.tmp/C/7d894d5d75e642bd80d672d695c631c9 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/C/7d894d5d75e642bd80d672d695c631c9 2024-11-27T16:22:44,032 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in a4d3cff76c8a3133e00d8e6d8859dc7d/C of a4d3cff76c8a3133e00d8e6d8859dc7d into 7d894d5d75e642bd80d672d695c631c9(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T16:22:44,032 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for a4d3cff76c8a3133e00d8e6d8859dc7d: 2024-11-27T16:22:44,032 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d., storeName=a4d3cff76c8a3133e00d8e6d8859dc7d/C, priority=13, startTime=1732724563454; duration=0sec 2024-11-27T16:22:44,032 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T16:22:44,032 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a4d3cff76c8a3133e00d8e6d8859dc7d:C 2024-11-27T16:22:44,058 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=118, memsize=47.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/.tmp/A/ebb5179562b84a52aa6739404cba4e5c 2024-11-27T16:22:44,061 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742231_1407 (size=31161) 2024-11-27T16:22:44,078 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/.tmp/B/b05fbb5180354e329fc44ab8076381a4 is 50, key is test_row_0/B:col10/1732724562907/Put/seqid=0 2024-11-27T16:22:44,122 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742232_1408 (size=12001) 2024-11-27T16:22:44,255 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:44,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33116 deadline: 1732724624246, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:44,258 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:44,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33110 deadline: 1732724624251, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:44,273 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:44,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33056 deadline: 1732724624259, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:44,276 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:44,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33092 deadline: 1732724624260, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:44,467 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/.tmp/A/104288c612004449bc800085c9f052fe as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/A/104288c612004449bc800085c9f052fe 2024-11-27T16:22:44,472 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in a4d3cff76c8a3133e00d8e6d8859dc7d/A of a4d3cff76c8a3133e00d8e6d8859dc7d into 104288c612004449bc800085c9f052fe(size=30.4 K), total size for store is 30.4 K. This selection was in queue for 0sec, and took 1sec to execute. 2024-11-27T16:22:44,472 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for a4d3cff76c8a3133e00d8e6d8859dc7d: 2024-11-27T16:22:44,472 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d., storeName=a4d3cff76c8a3133e00d8e6d8859dc7d/A, priority=13, startTime=1732724563454; duration=1sec 2024-11-27T16:22:44,473 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T16:22:44,473 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a4d3cff76c8a3133e00d8e6d8859dc7d:A 2024-11-27T16:22:44,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=102 2024-11-27T16:22:44,523 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=118 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/.tmp/B/b05fbb5180354e329fc44ab8076381a4 2024-11-27T16:22:44,546 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/.tmp/C/1fcd8c4dbe594a8395e14cc621fe0e65 is 50, key is test_row_0/C:col10/1732724562907/Put/seqid=0 2024-11-27T16:22:44,580 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742233_1409 (size=12001) 2024-11-27T16:22:44,764 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:44,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33116 deadline: 1732724624758, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:44,770 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:44,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33110 deadline: 1732724624760, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:44,788 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:44,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33056 deadline: 1732724624778, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:44,789 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:44,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33092 deadline: 1732724624780, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:44,980 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=118 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/.tmp/C/1fcd8c4dbe594a8395e14cc621fe0e65 2024-11-27T16:22:44,988 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/.tmp/A/ebb5179562b84a52aa6739404cba4e5c as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/A/ebb5179562b84a52aa6739404cba4e5c 2024-11-27T16:22:44,996 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/A/ebb5179562b84a52aa6739404cba4e5c, entries=150, sequenceid=118, filesize=30.2 K 2024-11-27T16:22:44,997 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/.tmp/B/b05fbb5180354e329fc44ab8076381a4 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/B/b05fbb5180354e329fc44ab8076381a4 2024-11-27T16:22:45,006 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/B/b05fbb5180354e329fc44ab8076381a4, entries=150, sequenceid=118, filesize=11.7 K 2024-11-27T16:22:45,007 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/.tmp/C/1fcd8c4dbe594a8395e14cc621fe0e65 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/C/1fcd8c4dbe594a8395e14cc621fe0e65 2024-11-27T16:22:45,011 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/C/1fcd8c4dbe594a8395e14cc621fe0e65, entries=150, sequenceid=118, filesize=11.7 K 2024-11-27T16:22:45,012 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=67.09 KB/68700 for a4d3cff76c8a3133e00d8e6d8859dc7d in 1549ms, sequenceid=118, compaction requested=false 2024-11-27T16:22:45,012 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HRegion(2538): Flush status journal for a4d3cff76c8a3133e00d8e6d8859dc7d: 2024-11-27T16:22:45,012 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d. 2024-11-27T16:22:45,012 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=103 2024-11-27T16:22:45,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4106): Remote procedure done, pid=103 2024-11-27T16:22:45,015 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=103, resume processing ppid=102 2024-11-27T16:22:45,015 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=103, ppid=102, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.6410 sec 2024-11-27T16:22:45,016 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=102, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=102, table=TestAcidGuarantees in 2.6470 sec 2024-11-27T16:22:45,775 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing a4d3cff76c8a3133e00d8e6d8859dc7d 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-11-27T16:22:45,775 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a4d3cff76c8a3133e00d8e6d8859dc7d, store=A 2024-11-27T16:22:45,776 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:22:45,776 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a4d3cff76c8a3133e00d8e6d8859dc7d, store=B 2024-11-27T16:22:45,776 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:22:45,776 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a4d3cff76c8a3133e00d8e6d8859dc7d, store=C 2024-11-27T16:22:45,776 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:22:45,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(8581): Flush requested on a4d3cff76c8a3133e00d8e6d8859dc7d 2024-11-27T16:22:45,792 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112769a2dbe4fb4043888551388153589026_a4d3cff76c8a3133e00d8e6d8859dc7d is 50, key is test_row_0/A:col10/1732724563602/Put/seqid=0 2024-11-27T16:22:45,834 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742234_1410 (size=14694) 2024-11-27T16:22:45,886 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:45,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33092 deadline: 1732724625866, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:45,886 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:45,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33110 deadline: 1732724625866, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:45,900 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:45,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33056 deadline: 1732724625883, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:45,900 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:45,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33116 deadline: 1732724625883, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:45,961 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:45,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33122 deadline: 1732724625955, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:45,963 DEBUG [Thread-1697 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4263 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d., hostname=7b191dec6496,44169,1732724452967, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-27T16:22:45,999 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:45,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33092 deadline: 1732724625989, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:46,000 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:46,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33110 deadline: 1732724625989, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:46,014 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:46,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33056 deadline: 1732724626013, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:46,018 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:46,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33116 deadline: 1732724626013, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:46,212 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:46,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33110 deadline: 1732724626202, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:46,213 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:46,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33092 deadline: 1732724626202, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:46,232 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:46,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33056 deadline: 1732724626217, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:46,232 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:46,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33116 deadline: 1732724626220, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:46,235 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:22:46,240 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112769a2dbe4fb4043888551388153589026_a4d3cff76c8a3133e00d8e6d8859dc7d to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112769a2dbe4fb4043888551388153589026_a4d3cff76c8a3133e00d8e6d8859dc7d 2024-11-27T16:22:46,242 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/.tmp/A/50b5000659b64a85bfc1a87ba5aa9fa0, store: [table=TestAcidGuarantees family=A region=a4d3cff76c8a3133e00d8e6d8859dc7d] 2024-11-27T16:22:46,243 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/.tmp/A/50b5000659b64a85bfc1a87ba5aa9fa0 is 175, key is test_row_0/A:col10/1732724563602/Put/seqid=0 2024-11-27T16:22:46,283 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742235_1411 (size=39649) 2024-11-27T16:22:46,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=102 2024-11-27T16:22:46,486 INFO [Thread-1701 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 102 completed 2024-11-27T16:22:46,489 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-27T16:22:46,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] procedure2.ProcedureExecutor(1098): Stored pid=104, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=104, table=TestAcidGuarantees 2024-11-27T16:22:46,491 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=104, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=104, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-27T16:22:46,492 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=104, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=104, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-27T16:22:46,492 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=105, ppid=104, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-27T16:22:46,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=104 2024-11-27T16:22:46,533 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:46,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33092 deadline: 1732724626519, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:46,535 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:46,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33110 deadline: 1732724626519, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:46,556 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:46,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33056 deadline: 1732724626541, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:46,572 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:46,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33116 deadline: 1732724626541, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:46,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=104 2024-11-27T16:22:46,646 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:22:46,646 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=105 2024-11-27T16:22:46,646 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d. 2024-11-27T16:22:46,647 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d. as already flushing 2024-11-27T16:22:46,647 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d. 2024-11-27T16:22:46,647 ERROR [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] handler.RSProcedureHandler(58): pid=105 java.io.IOException: Unable to complete flush {ENCODED => a4d3cff76c8a3133e00d8e6d8859dc7d, NAME => 'TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:22:46,647 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=105 java.io.IOException: Unable to complete flush {ENCODED => a4d3cff76c8a3133e00d8e6d8859dc7d, NAME => 'TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:22:46,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4114): Remote procedure failed, pid=105 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a4d3cff76c8a3133e00d8e6d8859dc7d, NAME => 'TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a4d3cff76c8a3133e00d8e6d8859dc7d, NAME => 'TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:22:46,684 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=135, memsize=24.6 K, hasBloomFilter=true, into tmp file hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/.tmp/A/50b5000659b64a85bfc1a87ba5aa9fa0 2024-11-27T16:22:46,705 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/.tmp/B/43b2c9ece0d74331919bb0d42d21f530 is 50, key is test_row_0/B:col10/1732724563602/Put/seqid=0 2024-11-27T16:22:46,733 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742236_1412 (size=12101) 2024-11-27T16:22:46,734 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=135 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/.tmp/B/43b2c9ece0d74331919bb0d42d21f530 2024-11-27T16:22:46,762 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/.tmp/C/04b08e33ca1e4f9385a751bb68810b35 is 50, key is test_row_0/C:col10/1732724563602/Put/seqid=0 2024-11-27T16:22:46,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=104 2024-11-27T16:22:46,799 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742237_1413 (size=12101) 2024-11-27T16:22:46,799 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:22:46,800 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=105 2024-11-27T16:22:46,800 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d. 2024-11-27T16:22:46,800 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d. as already flushing 2024-11-27T16:22:46,800 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d. 2024-11-27T16:22:46,800 ERROR [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] handler.RSProcedureHandler(58): pid=105 java.io.IOException: Unable to complete flush {ENCODED => a4d3cff76c8a3133e00d8e6d8859dc7d, NAME => 'TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:22:46,800 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=105 java.io.IOException: Unable to complete flush {ENCODED => a4d3cff76c8a3133e00d8e6d8859dc7d, NAME => 'TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:22:46,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4114): Remote procedure failed, pid=105 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a4d3cff76c8a3133e00d8e6d8859dc7d, NAME => 'TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a4d3cff76c8a3133e00d8e6d8859dc7d, NAME => 'TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:22:46,803 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=135 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/.tmp/C/04b08e33ca1e4f9385a751bb68810b35 2024-11-27T16:22:46,809 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/.tmp/A/50b5000659b64a85bfc1a87ba5aa9fa0 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/A/50b5000659b64a85bfc1a87ba5aa9fa0 2024-11-27T16:22:46,813 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/A/50b5000659b64a85bfc1a87ba5aa9fa0, entries=200, sequenceid=135, filesize=38.7 K 2024-11-27T16:22:46,814 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/.tmp/B/43b2c9ece0d74331919bb0d42d21f530 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/B/43b2c9ece0d74331919bb0d42d21f530 2024-11-27T16:22:46,820 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/B/43b2c9ece0d74331919bb0d42d21f530, entries=150, sequenceid=135, filesize=11.8 K 2024-11-27T16:22:46,821 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/.tmp/C/04b08e33ca1e4f9385a751bb68810b35 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/C/04b08e33ca1e4f9385a751bb68810b35 2024-11-27T16:22:46,825 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/C/04b08e33ca1e4f9385a751bb68810b35, entries=150, sequenceid=135, filesize=11.8 K 2024-11-27T16:22:46,825 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=134.18 KB/137400 for a4d3cff76c8a3133e00d8e6d8859dc7d in 1050ms, sequenceid=135, compaction requested=true 2024-11-27T16:22:46,826 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for a4d3cff76c8a3133e00d8e6d8859dc7d: 2024-11-27T16:22:46,826 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a4d3cff76c8a3133e00d8e6d8859dc7d:A, priority=-2147483648, current under compaction store size is 1 2024-11-27T16:22:46,826 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T16:22:46,826 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a4d3cff76c8a3133e00d8e6d8859dc7d:B, priority=-2147483648, current under compaction store size is 2 2024-11-27T16:22:46,826 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-27T16:22:46,826 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-27T16:22:46,826 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a4d3cff76c8a3133e00d8e6d8859dc7d:C, priority=-2147483648, current under compaction store size is 3 2024-11-27T16:22:46,826 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-11-27T16:22:46,826 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-27T16:22:46,827 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 101765 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-27T16:22:46,827 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HStore(1540): a4d3cff76c8a3133e00d8e6d8859dc7d/A is initiating minor compaction (all files) 2024-11-27T16:22:46,827 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of a4d3cff76c8a3133e00d8e6d8859dc7d/A in TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d. 2024-11-27T16:22:46,827 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/A/104288c612004449bc800085c9f052fe, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/A/ebb5179562b84a52aa6739404cba4e5c, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/A/50b5000659b64a85bfc1a87ba5aa9fa0] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/.tmp, totalSize=99.4 K 2024-11-27T16:22:46,827 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=53.85 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d. 2024-11-27T16:22:46,827 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d. files: [hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/A/104288c612004449bc800085c9f052fe, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/A/ebb5179562b84a52aa6739404cba4e5c, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/A/50b5000659b64a85bfc1a87ba5aa9fa0] 2024-11-27T16:22:46,828 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36309 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-27T16:22:46,828 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HStore(1540): a4d3cff76c8a3133e00d8e6d8859dc7d/B is initiating minor compaction (all files) 2024-11-27T16:22:46,828 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of a4d3cff76c8a3133e00d8e6d8859dc7d/B in TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d. 2024-11-27T16:22:46,828 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/B/45a4a193933349e490daeb751c058285, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/B/b05fbb5180354e329fc44ab8076381a4, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/B/43b2c9ece0d74331919bb0d42d21f530] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/.tmp, totalSize=35.5 K 2024-11-27T16:22:46,828 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 104288c612004449bc800085c9f052fe, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=93, earliestPutTs=1732724562359 2024-11-27T16:22:46,828 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting 45a4a193933349e490daeb751c058285, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=93, earliestPutTs=1732724562359 2024-11-27T16:22:46,828 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.Compactor(224): Compacting ebb5179562b84a52aa6739404cba4e5c, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=118, earliestPutTs=1732724562888 2024-11-27T16:22:46,829 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting b05fbb5180354e329fc44ab8076381a4, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=118, earliestPutTs=1732724562888 2024-11-27T16:22:46,829 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 50b5000659b64a85bfc1a87ba5aa9fa0, keycount=200, bloomtype=ROW, size=38.7 K, encoding=NONE, compression=NONE, seqNum=135, earliestPutTs=1732724563574 2024-11-27T16:22:46,829 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting 43b2c9ece0d74331919bb0d42d21f530, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=135, earliestPutTs=1732724563574 2024-11-27T16:22:46,853 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): a4d3cff76c8a3133e00d8e6d8859dc7d#B#compaction#351 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 53.85 MB/second 2024-11-27T16:22:46,854 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/.tmp/B/0866c0cff70b4399be0b52cc22ce49ab is 50, key is test_row_0/B:col10/1732724563602/Put/seqid=0 2024-11-27T16:22:46,869 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=a4d3cff76c8a3133e00d8e6d8859dc7d] 2024-11-27T16:22:46,892 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241127ad452d464b124f50994576e69de1c36c_a4d3cff76c8a3133e00d8e6d8859dc7d store=[table=TestAcidGuarantees family=A region=a4d3cff76c8a3133e00d8e6d8859dc7d] 2024-11-27T16:22:46,894 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241127ad452d464b124f50994576e69de1c36c_a4d3cff76c8a3133e00d8e6d8859dc7d, store=[table=TestAcidGuarantees family=A region=a4d3cff76c8a3133e00d8e6d8859dc7d] 2024-11-27T16:22:46,894 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241127ad452d464b124f50994576e69de1c36c_a4d3cff76c8a3133e00d8e6d8859dc7d because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=a4d3cff76c8a3133e00d8e6d8859dc7d] 2024-11-27T16:22:46,919 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742238_1414 (size=12409) 2024-11-27T16:22:46,925 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/.tmp/B/0866c0cff70b4399be0b52cc22ce49ab as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/B/0866c0cff70b4399be0b52cc22ce49ab 2024-11-27T16:22:46,929 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in a4d3cff76c8a3133e00d8e6d8859dc7d/B of a4d3cff76c8a3133e00d8e6d8859dc7d into 0866c0cff70b4399be0b52cc22ce49ab(size=12.1 K), total size for store is 12.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T16:22:46,930 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for a4d3cff76c8a3133e00d8e6d8859dc7d: 2024-11-27T16:22:46,930 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d., storeName=a4d3cff76c8a3133e00d8e6d8859dc7d/B, priority=13, startTime=1732724566826; duration=0sec 2024-11-27T16:22:46,930 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-27T16:22:46,930 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a4d3cff76c8a3133e00d8e6d8859dc7d:B 2024-11-27T16:22:46,930 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-27T16:22:46,931 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36309 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-27T16:22:46,931 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HStore(1540): a4d3cff76c8a3133e00d8e6d8859dc7d/C is initiating minor compaction (all files) 2024-11-27T16:22:46,931 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of a4d3cff76c8a3133e00d8e6d8859dc7d/C in TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d. 2024-11-27T16:22:46,931 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/C/7d894d5d75e642bd80d672d695c631c9, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/C/1fcd8c4dbe594a8395e14cc621fe0e65, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/C/04b08e33ca1e4f9385a751bb68810b35] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/.tmp, totalSize=35.5 K 2024-11-27T16:22:46,932 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting 7d894d5d75e642bd80d672d695c631c9, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=93, earliestPutTs=1732724562359 2024-11-27T16:22:46,932 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting 1fcd8c4dbe594a8395e14cc621fe0e65, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=118, earliestPutTs=1732724562888 2024-11-27T16:22:46,933 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting 04b08e33ca1e4f9385a751bb68810b35, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=135, earliestPutTs=1732724563574 2024-11-27T16:22:46,953 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:22:46,954 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742239_1415 (size=4469) 2024-11-27T16:22:46,956 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): a4d3cff76c8a3133e00d8e6d8859dc7d#A#compaction#352 average throughput is 0.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 53.85 MB/second 2024-11-27T16:22:46,956 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/.tmp/A/b3c9ee8c7d3a4c7281c43b290b778477 is 175, key is test_row_0/A:col10/1732724563602/Put/seqid=0 2024-11-27T16:22:46,957 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=105 2024-11-27T16:22:46,958 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d. 2024-11-27T16:22:46,958 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegion(2837): Flushing a4d3cff76c8a3133e00d8e6d8859dc7d 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-11-27T16:22:46,959 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a4d3cff76c8a3133e00d8e6d8859dc7d, store=A 2024-11-27T16:22:46,959 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:22:46,959 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a4d3cff76c8a3133e00d8e6d8859dc7d, store=B 2024-11-27T16:22:46,959 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:22:46,959 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a4d3cff76c8a3133e00d8e6d8859dc7d, store=C 2024-11-27T16:22:46,959 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:22:46,962 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): a4d3cff76c8a3133e00d8e6d8859dc7d#C#compaction#353 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 53.85 MB/second 2024-11-27T16:22:46,963 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/.tmp/C/7fb29b49f56845f5a88739f7cf9b626a is 50, key is test_row_0/C:col10/1732724563602/Put/seqid=0 2024-11-27T16:22:47,037 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742241_1417 (size=12409) 2024-11-27T16:22:47,041 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411278a60e1ee2a474c60a61457d4d12bc989_a4d3cff76c8a3133e00d8e6d8859dc7d is 50, key is test_row_0/A:col10/1732724565874/Put/seqid=0 2024-11-27T16:22:47,045 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742240_1416 (size=31363) 2024-11-27T16:22:47,050 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d. as already flushing 2024-11-27T16:22:47,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(8581): Flush requested on a4d3cff76c8a3133e00d8e6d8859dc7d 2024-11-27T16:22:47,063 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/.tmp/A/b3c9ee8c7d3a4c7281c43b290b778477 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/A/b3c9ee8c7d3a4c7281c43b290b778477 2024-11-27T16:22:47,069 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in a4d3cff76c8a3133e00d8e6d8859dc7d/A of a4d3cff76c8a3133e00d8e6d8859dc7d into b3c9ee8c7d3a4c7281c43b290b778477(size=30.6 K), total size for store is 30.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T16:22:47,069 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for a4d3cff76c8a3133e00d8e6d8859dc7d: 2024-11-27T16:22:47,070 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d., storeName=a4d3cff76c8a3133e00d8e6d8859dc7d/A, priority=13, startTime=1732724566826; duration=0sec 2024-11-27T16:22:47,070 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T16:22:47,070 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a4d3cff76c8a3133e00d8e6d8859dc7d:A 2024-11-27T16:22:47,074 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742242_1418 (size=12304) 2024-11-27T16:22:47,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=104 2024-11-27T16:22:47,100 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:47,100 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:47,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33056 deadline: 1732724627081, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:47,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33110 deadline: 1732724627084, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:47,113 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:47,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33116 deadline: 1732724627101, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:47,113 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:47,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33092 deadline: 1732724627101, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:47,215 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:47,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33110 deadline: 1732724627208, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:47,216 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:47,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33056 deadline: 1732724627208, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:47,216 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:47,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33116 deadline: 1732724627215, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:47,217 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:47,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33092 deadline: 1732724627216, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:47,428 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:47,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33116 deadline: 1732724627418, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:47,429 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:47,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33056 deadline: 1732724627419, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:47,429 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:47,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33092 deadline: 1732724627420, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:47,430 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:47,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33110 deadline: 1732724627417, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:47,447 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/.tmp/C/7fb29b49f56845f5a88739f7cf9b626a as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/C/7fb29b49f56845f5a88739f7cf9b626a 2024-11-27T16:22:47,452 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in a4d3cff76c8a3133e00d8e6d8859dc7d/C of a4d3cff76c8a3133e00d8e6d8859dc7d into 7fb29b49f56845f5a88739f7cf9b626a(size=12.1 K), total size for store is 12.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T16:22:47,452 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for a4d3cff76c8a3133e00d8e6d8859dc7d: 2024-11-27T16:22:47,452 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d., storeName=a4d3cff76c8a3133e00d8e6d8859dc7d/C, priority=13, startTime=1732724566826; duration=0sec 2024-11-27T16:22:47,452 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T16:22:47,452 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a4d3cff76c8a3133e00d8e6d8859dc7d:C 2024-11-27T16:22:47,475 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:22:47,480 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411278a60e1ee2a474c60a61457d4d12bc989_a4d3cff76c8a3133e00d8e6d8859dc7d to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411278a60e1ee2a474c60a61457d4d12bc989_a4d3cff76c8a3133e00d8e6d8859dc7d 2024-11-27T16:22:47,482 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/.tmp/A/1e0b6ee33ee346218501fe98e762c26d, store: [table=TestAcidGuarantees family=A region=a4d3cff76c8a3133e00d8e6d8859dc7d] 2024-11-27T16:22:47,483 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/.tmp/A/1e0b6ee33ee346218501fe98e762c26d is 175, key is test_row_0/A:col10/1732724565874/Put/seqid=0 2024-11-27T16:22:47,521 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742243_1419 (size=31105) 2024-11-27T16:22:47,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=104 2024-11-27T16:22:47,749 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:47,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33116 deadline: 1732724627730, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:47,752 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:47,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33110 deadline: 1732724627744, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:47,753 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:47,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33092 deadline: 1732724627745, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:47,753 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:47,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33056 deadline: 1732724627745, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:47,924 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=159, memsize=44.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/.tmp/A/1e0b6ee33ee346218501fe98e762c26d 2024-11-27T16:22:47,943 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/.tmp/B/085dc0d27a304af0a020c7d435233934 is 50, key is test_row_0/B:col10/1732724565874/Put/seqid=0 2024-11-27T16:22:47,983 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742244_1420 (size=12151) 2024-11-27T16:22:47,984 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=159 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/.tmp/B/085dc0d27a304af0a020c7d435233934 2024-11-27T16:22:48,006 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/.tmp/C/29b8b7b8920647e6893a560e7654b195 is 50, key is test_row_0/C:col10/1732724565874/Put/seqid=0 2024-11-27T16:22:48,037 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742245_1421 (size=12151) 2024-11-27T16:22:48,258 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:48,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33116 deadline: 1732724628251, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:48,262 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:48,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33092 deadline: 1732724628258, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:48,266 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:48,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33056 deadline: 1732724628258, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:48,268 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:48,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33110 deadline: 1732724628259, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:48,438 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=159 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/.tmp/C/29b8b7b8920647e6893a560e7654b195 2024-11-27T16:22:48,445 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/.tmp/A/1e0b6ee33ee346218501fe98e762c26d as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/A/1e0b6ee33ee346218501fe98e762c26d 2024-11-27T16:22:48,450 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/A/1e0b6ee33ee346218501fe98e762c26d, entries=150, sequenceid=159, filesize=30.4 K 2024-11-27T16:22:48,454 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/.tmp/B/085dc0d27a304af0a020c7d435233934 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/B/085dc0d27a304af0a020c7d435233934 2024-11-27T16:22:48,459 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/B/085dc0d27a304af0a020c7d435233934, entries=150, sequenceid=159, filesize=11.9 K 2024-11-27T16:22:48,460 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/.tmp/C/29b8b7b8920647e6893a560e7654b195 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/C/29b8b7b8920647e6893a560e7654b195 2024-11-27T16:22:48,467 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/C/29b8b7b8920647e6893a560e7654b195, entries=150, sequenceid=159, filesize=11.9 K 2024-11-27T16:22:48,476 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=67.09 KB/68700 for a4d3cff76c8a3133e00d8e6d8859dc7d in 1518ms, sequenceid=159, compaction requested=false 2024-11-27T16:22:48,476 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegion(2538): Flush status journal for a4d3cff76c8a3133e00d8e6d8859dc7d: 2024-11-27T16:22:48,476 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d. 2024-11-27T16:22:48,476 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=105 2024-11-27T16:22:48,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4106): Remote procedure done, pid=105 2024-11-27T16:22:48,481 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=105, resume processing ppid=104 2024-11-27T16:22:48,481 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=105, ppid=104, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.9850 sec 2024-11-27T16:22:48,483 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=104, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=104, table=TestAcidGuarantees in 1.9930 sec 2024-11-27T16:22:48,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=104 2024-11-27T16:22:48,598 INFO [Thread-1701 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 104 completed 2024-11-27T16:22:48,601 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-27T16:22:48,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] procedure2.ProcedureExecutor(1098): Stored pid=106, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=106, table=TestAcidGuarantees 2024-11-27T16:22:48,603 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=106, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=106, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-27T16:22:48,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=106 2024-11-27T16:22:48,603 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=106, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=106, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-27T16:22:48,603 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=107, ppid=106, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-27T16:22:48,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=106 2024-11-27T16:22:48,755 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:22:48,755 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=107 2024-11-27T16:22:48,755 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d. 2024-11-27T16:22:48,756 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegion(2837): Flushing a4d3cff76c8a3133e00d8e6d8859dc7d 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-11-27T16:22:48,756 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a4d3cff76c8a3133e00d8e6d8859dc7d, store=A 2024-11-27T16:22:48,756 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:22:48,756 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a4d3cff76c8a3133e00d8e6d8859dc7d, store=B 2024-11-27T16:22:48,756 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:22:48,756 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a4d3cff76c8a3133e00d8e6d8859dc7d, store=C 2024-11-27T16:22:48,756 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:22:48,803 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411276c8e5454fc0842f491cd95453cba8148_a4d3cff76c8a3133e00d8e6d8859dc7d is 50, key is test_row_0/A:col10/1732724567086/Put/seqid=0 2024-11-27T16:22:48,844 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742246_1422 (size=12304) 2024-11-27T16:22:48,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=106 2024-11-27T16:22:49,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=106 2024-11-27T16:22:49,246 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:22:49,250 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411276c8e5454fc0842f491cd95453cba8148_a4d3cff76c8a3133e00d8e6d8859dc7d to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411276c8e5454fc0842f491cd95453cba8148_a4d3cff76c8a3133e00d8e6d8859dc7d 2024-11-27T16:22:49,251 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/.tmp/A/26dceeee41364cb3bfcd523ad31f89b7, store: [table=TestAcidGuarantees family=A region=a4d3cff76c8a3133e00d8e6d8859dc7d] 2024-11-27T16:22:49,252 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/.tmp/A/26dceeee41364cb3bfcd523ad31f89b7 is 175, key is test_row_0/A:col10/1732724567086/Put/seqid=0 2024-11-27T16:22:49,264 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d. as already flushing 2024-11-27T16:22:49,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(8581): Flush requested on a4d3cff76c8a3133e00d8e6d8859dc7d 2024-11-27T16:22:49,281 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742247_1423 (size=31105) 2024-11-27T16:22:49,282 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=174, memsize=22.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/.tmp/A/26dceeee41364cb3bfcd523ad31f89b7 2024-11-27T16:22:49,302 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/.tmp/B/88e382f00363461da9f322631a2f6e54 is 50, key is test_row_0/B:col10/1732724567086/Put/seqid=0 2024-11-27T16:22:49,326 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742248_1424 (size=12151) 2024-11-27T16:22:49,330 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=174 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/.tmp/B/88e382f00363461da9f322631a2f6e54 2024-11-27T16:22:49,334 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:49,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33092 deadline: 1732724629322, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:49,334 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:49,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33116 deadline: 1732724629323, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:49,335 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:49,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33110 deadline: 1732724629325, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:49,344 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:49,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33056 deadline: 1732724629334, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:49,354 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/.tmp/C/4736a38e288140ef964bd1ea89e87a9c is 50, key is test_row_0/C:col10/1732724567086/Put/seqid=0 2024-11-27T16:22:49,382 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742249_1425 (size=12151) 2024-11-27T16:22:49,382 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=174 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/.tmp/C/4736a38e288140ef964bd1ea89e87a9c 2024-11-27T16:22:49,388 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/.tmp/A/26dceeee41364cb3bfcd523ad31f89b7 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/A/26dceeee41364cb3bfcd523ad31f89b7 2024-11-27T16:22:49,393 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/A/26dceeee41364cb3bfcd523ad31f89b7, entries=150, sequenceid=174, filesize=30.4 K 2024-11-27T16:22:49,394 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/.tmp/B/88e382f00363461da9f322631a2f6e54 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/B/88e382f00363461da9f322631a2f6e54 2024-11-27T16:22:49,398 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/B/88e382f00363461da9f322631a2f6e54, entries=150, sequenceid=174, filesize=11.9 K 2024-11-27T16:22:49,398 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/.tmp/C/4736a38e288140ef964bd1ea89e87a9c as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/C/4736a38e288140ef964bd1ea89e87a9c 2024-11-27T16:22:49,403 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/C/4736a38e288140ef964bd1ea89e87a9c, entries=150, sequenceid=174, filesize=11.9 K 2024-11-27T16:22:49,404 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for a4d3cff76c8a3133e00d8e6d8859dc7d in 649ms, sequenceid=174, compaction requested=true 2024-11-27T16:22:49,404 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegion(2538): Flush status journal for a4d3cff76c8a3133e00d8e6d8859dc7d: 2024-11-27T16:22:49,404 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d. 2024-11-27T16:22:49,404 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=107 2024-11-27T16:22:49,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4106): Remote procedure done, pid=107 2024-11-27T16:22:49,407 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=107, resume processing ppid=106 2024-11-27T16:22:49,407 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=107, ppid=106, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 803 msec 2024-11-27T16:22:49,409 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=106, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=106, table=TestAcidGuarantees in 807 msec 2024-11-27T16:22:49,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(8581): Flush requested on a4d3cff76c8a3133e00d8e6d8859dc7d 2024-11-27T16:22:49,441 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing a4d3cff76c8a3133e00d8e6d8859dc7d 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-11-27T16:22:49,441 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a4d3cff76c8a3133e00d8e6d8859dc7d, store=A 2024-11-27T16:22:49,441 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:22:49,441 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a4d3cff76c8a3133e00d8e6d8859dc7d, store=B 2024-11-27T16:22:49,441 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:22:49,441 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a4d3cff76c8a3133e00d8e6d8859dc7d, store=C 2024-11-27T16:22:49,441 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:22:49,455 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241127720c9902615a46519c5f15ba96c91b69_a4d3cff76c8a3133e00d8e6d8859dc7d is 50, key is test_row_0/A:col10/1732724569332/Put/seqid=0 2024-11-27T16:22:49,484 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742250_1426 (size=14794) 2024-11-27T16:22:49,493 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:49,493 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:49,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33110 deadline: 1732724629471, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:49,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33092 deadline: 1732724629470, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:49,494 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:49,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33116 deadline: 1732724629482, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:49,498 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:49,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33056 deadline: 1732724629488, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:49,603 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:49,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33092 deadline: 1732724629594, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:49,606 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:49,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33110 deadline: 1732724629595, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:49,607 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:49,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33116 deadline: 1732724629595, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:49,609 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:49,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33056 deadline: 1732724629600, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:49,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=106 2024-11-27T16:22:49,708 INFO [Thread-1701 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 106 completed 2024-11-27T16:22:49,709 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-27T16:22:49,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] procedure2.ProcedureExecutor(1098): Stored pid=108, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=108, table=TestAcidGuarantees 2024-11-27T16:22:49,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=108 2024-11-27T16:22:49,713 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=108, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=108, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-27T16:22:49,714 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=108, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=108, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-27T16:22:49,714 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=109, ppid=108, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-27T16:22:49,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=108 2024-11-27T16:22:49,820 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:49,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33110 deadline: 1732724629808, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:49,820 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:49,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33092 deadline: 1732724629810, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:49,821 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:49,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33116 deadline: 1732724629810, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:49,821 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:49,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33056 deadline: 1732724629811, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:49,866 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:22:49,867 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=109 2024-11-27T16:22:49,867 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d. 2024-11-27T16:22:49,867 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d. as already flushing 2024-11-27T16:22:49,867 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d. 2024-11-27T16:22:49,867 ERROR [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] handler.RSProcedureHandler(58): pid=109 java.io.IOException: Unable to complete flush {ENCODED => a4d3cff76c8a3133e00d8e6d8859dc7d, NAME => 'TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:22:49,868 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=109 java.io.IOException: Unable to complete flush {ENCODED => a4d3cff76c8a3133e00d8e6d8859dc7d, NAME => 'TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:22:49,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4114): Remote procedure failed, pid=109 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a4d3cff76c8a3133e00d8e6d8859dc7d, NAME => 'TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a4d3cff76c8a3133e00d8e6d8859dc7d, NAME => 'TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:22:49,886 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:22:49,891 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241127720c9902615a46519c5f15ba96c91b69_a4d3cff76c8a3133e00d8e6d8859dc7d to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241127720c9902615a46519c5f15ba96c91b69_a4d3cff76c8a3133e00d8e6d8859dc7d 2024-11-27T16:22:49,905 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/.tmp/A/581b591148c346c4b78806b85ba257b2, store: [table=TestAcidGuarantees family=A region=a4d3cff76c8a3133e00d8e6d8859dc7d] 2024-11-27T16:22:49,905 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/.tmp/A/581b591148c346c4b78806b85ba257b2 is 175, key is test_row_0/A:col10/1732724569332/Put/seqid=0 2024-11-27T16:22:49,939 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742251_1427 (size=39749) 2024-11-27T16:22:49,940 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=198, memsize=47.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/.tmp/A/581b591148c346c4b78806b85ba257b2 2024-11-27T16:22:49,991 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/.tmp/B/7b0d0a00fa1b4e21aa405031c727c90e is 50, key is test_row_0/B:col10/1732724569332/Put/seqid=0 2024-11-27T16:22:50,010 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:50,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33122 deadline: 1732724629997, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:50,011 DEBUG [Thread-1697 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8311 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d., hostname=7b191dec6496,44169,1732724452967, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-27T16:22:50,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=108 2024-11-27T16:22:50,026 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:22:50,027 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=109 2024-11-27T16:22:50,027 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d. 2024-11-27T16:22:50,027 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d. as already flushing 2024-11-27T16:22:50,027 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d. 2024-11-27T16:22:50,027 ERROR [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] handler.RSProcedureHandler(58): pid=109 java.io.IOException: Unable to complete flush {ENCODED => a4d3cff76c8a3133e00d8e6d8859dc7d, NAME => 'TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:22:50,028 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=109 java.io.IOException: Unable to complete flush {ENCODED => a4d3cff76c8a3133e00d8e6d8859dc7d, NAME => 'TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:22:50,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4114): Remote procedure failed, pid=109 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a4d3cff76c8a3133e00d8e6d8859dc7d, NAME => 'TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a4d3cff76c8a3133e00d8e6d8859dc7d, NAME => 'TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:22:50,032 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742252_1428 (size=12151) 2024-11-27T16:22:50,126 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:50,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33110 deadline: 1732724630121, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:50,127 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:50,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33092 deadline: 1732724630122, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:50,127 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:50,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33056 deadline: 1732724630122, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:50,133 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:50,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33116 deadline: 1732724630124, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:50,181 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:22:50,181 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=109 2024-11-27T16:22:50,181 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d. 2024-11-27T16:22:50,181 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d. as already flushing 2024-11-27T16:22:50,181 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d. 2024-11-27T16:22:50,182 ERROR [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] handler.RSProcedureHandler(58): pid=109 java.io.IOException: Unable to complete flush {ENCODED => a4d3cff76c8a3133e00d8e6d8859dc7d, NAME => 'TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:22:50,182 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=109 java.io.IOException: Unable to complete flush {ENCODED => a4d3cff76c8a3133e00d8e6d8859dc7d, NAME => 'TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:22:50,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4114): Remote procedure failed, pid=109 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a4d3cff76c8a3133e00d8e6d8859dc7d, NAME => 'TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a4d3cff76c8a3133e00d8e6d8859dc7d, NAME => 'TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:22:50,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=108 2024-11-27T16:22:50,334 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:22:50,335 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=109 2024-11-27T16:22:50,335 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d. 2024-11-27T16:22:50,335 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d. as already flushing 2024-11-27T16:22:50,335 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d. 2024-11-27T16:22:50,335 ERROR [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] handler.RSProcedureHandler(58): pid=109 java.io.IOException: Unable to complete flush {ENCODED => a4d3cff76c8a3133e00d8e6d8859dc7d, NAME => 'TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:22:50,335 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=109 java.io.IOException: Unable to complete flush {ENCODED => a4d3cff76c8a3133e00d8e6d8859dc7d, NAME => 'TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:22:50,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4114): Remote procedure failed, pid=109 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a4d3cff76c8a3133e00d8e6d8859dc7d, NAME => 'TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a4d3cff76c8a3133e00d8e6d8859dc7d, NAME => 'TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:22:50,433 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=198 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/.tmp/B/7b0d0a00fa1b4e21aa405031c727c90e 2024-11-27T16:22:50,448 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/.tmp/C/f444345f6985499b8f10191689a2186f is 50, key is test_row_0/C:col10/1732724569332/Put/seqid=0 2024-11-27T16:22:50,488 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:22:50,489 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=109 2024-11-27T16:22:50,495 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742253_1429 (size=12151) 2024-11-27T16:22:50,496 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d. 2024-11-27T16:22:50,496 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d. as already flushing 2024-11-27T16:22:50,496 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d. 2024-11-27T16:22:50,496 ERROR [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] handler.RSProcedureHandler(58): pid=109 java.io.IOException: Unable to complete flush {ENCODED => a4d3cff76c8a3133e00d8e6d8859dc7d, NAME => 'TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:22:50,496 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=109 java.io.IOException: Unable to complete flush {ENCODED => a4d3cff76c8a3133e00d8e6d8859dc7d, NAME => 'TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:22:50,496 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=198 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/.tmp/C/f444345f6985499b8f10191689a2186f 2024-11-27T16:22:50,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4114): Remote procedure failed, pid=109 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a4d3cff76c8a3133e00d8e6d8859dc7d, NAME => 'TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a4d3cff76c8a3133e00d8e6d8859dc7d, NAME => 'TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:22:50,503 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/.tmp/A/581b591148c346c4b78806b85ba257b2 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/A/581b591148c346c4b78806b85ba257b2 2024-11-27T16:22:50,507 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/A/581b591148c346c4b78806b85ba257b2, entries=200, sequenceid=198, filesize=38.8 K 2024-11-27T16:22:50,509 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/.tmp/B/7b0d0a00fa1b4e21aa405031c727c90e as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/B/7b0d0a00fa1b4e21aa405031c727c90e 2024-11-27T16:22:50,512 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/B/7b0d0a00fa1b4e21aa405031c727c90e, entries=150, sequenceid=198, filesize=11.9 K 2024-11-27T16:22:50,513 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/.tmp/C/f444345f6985499b8f10191689a2186f as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/C/f444345f6985499b8f10191689a2186f 2024-11-27T16:22:50,518 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/C/f444345f6985499b8f10191689a2186f, entries=150, sequenceid=198, filesize=11.9 K 2024-11-27T16:22:50,519 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=67.09 KB/68700 for a4d3cff76c8a3133e00d8e6d8859dc7d in 1079ms, sequenceid=198, compaction requested=true 2024-11-27T16:22:50,519 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for a4d3cff76c8a3133e00d8e6d8859dc7d: 2024-11-27T16:22:50,519 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a4d3cff76c8a3133e00d8e6d8859dc7d:A, priority=-2147483648, current under compaction store size is 1 2024-11-27T16:22:50,519 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T16:22:50,519 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a4d3cff76c8a3133e00d8e6d8859dc7d:B, priority=-2147483648, current under compaction store size is 2 2024-11-27T16:22:50,519 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-27T16:22:50,519 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T16:22:50,519 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a4d3cff76c8a3133e00d8e6d8859dc7d:C, priority=-2147483648, current under compaction store size is 3 2024-11-27T16:22:50,519 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-27T16:22:50,519 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-27T16:22:50,521 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 133322 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-27T16:22:50,521 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48862 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-27T16:22:50,521 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HStore(1540): a4d3cff76c8a3133e00d8e6d8859dc7d/A is initiating minor compaction (all files) 2024-11-27T16:22:50,521 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HStore(1540): a4d3cff76c8a3133e00d8e6d8859dc7d/B is initiating minor compaction (all files) 2024-11-27T16:22:50,521 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of a4d3cff76c8a3133e00d8e6d8859dc7d/A in TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d. 2024-11-27T16:22:50,521 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of a4d3cff76c8a3133e00d8e6d8859dc7d/B in TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d. 2024-11-27T16:22:50,521 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/A/b3c9ee8c7d3a4c7281c43b290b778477, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/A/1e0b6ee33ee346218501fe98e762c26d, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/A/26dceeee41364cb3bfcd523ad31f89b7, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/A/581b591148c346c4b78806b85ba257b2] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/.tmp, totalSize=130.2 K 2024-11-27T16:22:50,521 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/B/0866c0cff70b4399be0b52cc22ce49ab, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/B/085dc0d27a304af0a020c7d435233934, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/B/88e382f00363461da9f322631a2f6e54, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/B/7b0d0a00fa1b4e21aa405031c727c90e] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/.tmp, totalSize=47.7 K 2024-11-27T16:22:50,521 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=12 throughput controller=DefaultCompactionThroughputController [maxThroughput=53.85 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d. 2024-11-27T16:22:50,521 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d. files: [hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/A/b3c9ee8c7d3a4c7281c43b290b778477, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/A/1e0b6ee33ee346218501fe98e762c26d, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/A/26dceeee41364cb3bfcd523ad31f89b7, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/A/581b591148c346c4b78806b85ba257b2] 2024-11-27T16:22:50,522 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.Compactor(224): Compacting b3c9ee8c7d3a4c7281c43b290b778477, keycount=150, bloomtype=ROW, size=30.6 K, encoding=NONE, compression=NONE, seqNum=135, earliestPutTs=1732724563574 2024-11-27T16:22:50,522 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1e0b6ee33ee346218501fe98e762c26d, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=159, earliestPutTs=1732724565856 2024-11-27T16:22:50,523 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting 0866c0cff70b4399be0b52cc22ce49ab, keycount=150, bloomtype=ROW, size=12.1 K, encoding=NONE, compression=NONE, seqNum=135, earliestPutTs=1732724563574 2024-11-27T16:22:50,523 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 26dceeee41364cb3bfcd523ad31f89b7, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=174, earliestPutTs=1732724567081 2024-11-27T16:22:50,523 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 581b591148c346c4b78806b85ba257b2, keycount=200, bloomtype=ROW, size=38.8 K, encoding=NONE, compression=NONE, seqNum=198, earliestPutTs=1732724569318 2024-11-27T16:22:50,523 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting 085dc0d27a304af0a020c7d435233934, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=159, earliestPutTs=1732724565856 2024-11-27T16:22:50,524 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting 88e382f00363461da9f322631a2f6e54, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=174, earliestPutTs=1732724567081 2024-11-27T16:22:50,524 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting 7b0d0a00fa1b4e21aa405031c727c90e, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=198, earliestPutTs=1732724569323 2024-11-27T16:22:50,556 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=a4d3cff76c8a3133e00d8e6d8859dc7d] 2024-11-27T16:22:50,574 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): a4d3cff76c8a3133e00d8e6d8859dc7d#B#compaction#364 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 53.85 MB/second 2024-11-27T16:22:50,575 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/.tmp/B/b72723bc276c48649a86dff9a35aa36d is 50, key is test_row_0/B:col10/1732724569332/Put/seqid=0 2024-11-27T16:22:50,587 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241127ce627200e4054db9aabd0063afb154ac_a4d3cff76c8a3133e00d8e6d8859dc7d store=[table=TestAcidGuarantees family=A region=a4d3cff76c8a3133e00d8e6d8859dc7d] 2024-11-27T16:22:50,589 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241127ce627200e4054db9aabd0063afb154ac_a4d3cff76c8a3133e00d8e6d8859dc7d, store=[table=TestAcidGuarantees family=A region=a4d3cff76c8a3133e00d8e6d8859dc7d] 2024-11-27T16:22:50,589 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241127ce627200e4054db9aabd0063afb154ac_a4d3cff76c8a3133e00d8e6d8859dc7d because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=a4d3cff76c8a3133e00d8e6d8859dc7d] 2024-11-27T16:22:50,621 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742254_1430 (size=12595) 2024-11-27T16:22:50,629 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742255_1431 (size=4469) 2024-11-27T16:22:50,633 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): a4d3cff76c8a3133e00d8e6d8859dc7d#A#compaction#363 average throughput is 0.32 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 53.85 MB/second 2024-11-27T16:22:50,634 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/.tmp/A/15fcda054bdb4ca6bd4a9868610805f6 is 175, key is test_row_0/A:col10/1732724569332/Put/seqid=0 2024-11-27T16:22:50,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(8581): Flush requested on a4d3cff76c8a3133e00d8e6d8859dc7d 2024-11-27T16:22:50,646 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing a4d3cff76c8a3133e00d8e6d8859dc7d 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-11-27T16:22:50,647 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a4d3cff76c8a3133e00d8e6d8859dc7d, store=A 2024-11-27T16:22:50,647 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:22:50,647 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a4d3cff76c8a3133e00d8e6d8859dc7d, store=B 2024-11-27T16:22:50,647 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:22:50,647 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a4d3cff76c8a3133e00d8e6d8859dc7d, store=C 2024-11-27T16:22:50,647 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:22:50,649 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:22:50,649 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=109 2024-11-27T16:22:50,649 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d. 2024-11-27T16:22:50,649 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d. as already flushing 2024-11-27T16:22:50,649 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d. 2024-11-27T16:22:50,649 ERROR [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] handler.RSProcedureHandler(58): pid=109 java.io.IOException: Unable to complete flush {ENCODED => a4d3cff76c8a3133e00d8e6d8859dc7d, NAME => 'TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:22:50,650 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=109 java.io.IOException: Unable to complete flush {ENCODED => a4d3cff76c8a3133e00d8e6d8859dc7d, NAME => 'TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:22:50,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4114): Remote procedure failed, pid=109 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a4d3cff76c8a3133e00d8e6d8859dc7d, NAME => 'TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a4d3cff76c8a3133e00d8e6d8859dc7d, NAME => 'TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:22:50,656 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411274ef612bfdbb34a619ad30db24391d1ec_a4d3cff76c8a3133e00d8e6d8859dc7d is 50, key is test_row_0/A:col10/1732724570644/Put/seqid=0 2024-11-27T16:22:50,668 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742256_1432 (size=31549) 2024-11-27T16:22:50,673 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/.tmp/A/15fcda054bdb4ca6bd4a9868610805f6 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/A/15fcda054bdb4ca6bd4a9868610805f6 2024-11-27T16:22:50,685 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in a4d3cff76c8a3133e00d8e6d8859dc7d/A of a4d3cff76c8a3133e00d8e6d8859dc7d into 15fcda054bdb4ca6bd4a9868610805f6(size=30.8 K), total size for store is 30.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T16:22:50,685 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for a4d3cff76c8a3133e00d8e6d8859dc7d: 2024-11-27T16:22:50,685 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d., storeName=a4d3cff76c8a3133e00d8e6d8859dc7d/A, priority=12, startTime=1732724570519; duration=0sec 2024-11-27T16:22:50,685 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-27T16:22:50,685 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a4d3cff76c8a3133e00d8e6d8859dc7d:A 2024-11-27T16:22:50,685 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-27T16:22:50,689 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48862 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-27T16:22:50,689 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HStore(1540): a4d3cff76c8a3133e00d8e6d8859dc7d/C is initiating minor compaction (all files) 2024-11-27T16:22:50,689 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of a4d3cff76c8a3133e00d8e6d8859dc7d/C in TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d. 2024-11-27T16:22:50,694 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/C/7fb29b49f56845f5a88739f7cf9b626a, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/C/29b8b7b8920647e6893a560e7654b195, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/C/4736a38e288140ef964bd1ea89e87a9c, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/C/f444345f6985499b8f10191689a2186f] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/.tmp, totalSize=47.7 K 2024-11-27T16:22:50,694 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7fb29b49f56845f5a88739f7cf9b626a, keycount=150, bloomtype=ROW, size=12.1 K, encoding=NONE, compression=NONE, seqNum=135, earliestPutTs=1732724563574 2024-11-27T16:22:50,695 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 29b8b7b8920647e6893a560e7654b195, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=159, earliestPutTs=1732724565856 2024-11-27T16:22:50,695 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4736a38e288140ef964bd1ea89e87a9c, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=174, earliestPutTs=1732724567081 2024-11-27T16:22:50,696 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.Compactor(224): Compacting f444345f6985499b8f10191689a2186f, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=198, earliestPutTs=1732724569323 2024-11-27T16:22:50,708 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742257_1433 (size=17284) 2024-11-27T16:22:50,708 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:22:50,714 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411274ef612bfdbb34a619ad30db24391d1ec_a4d3cff76c8a3133e00d8e6d8859dc7d to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411274ef612bfdbb34a619ad30db24391d1ec_a4d3cff76c8a3133e00d8e6d8859dc7d 2024-11-27T16:22:50,715 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/.tmp/A/9c9f94791b7e4cc3afbe109e44a23853, store: [table=TestAcidGuarantees family=A region=a4d3cff76c8a3133e00d8e6d8859dc7d] 2024-11-27T16:22:50,715 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/.tmp/A/9c9f94791b7e4cc3afbe109e44a23853 is 175, key is test_row_0/A:col10/1732724570644/Put/seqid=0 2024-11-27T16:22:50,719 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): a4d3cff76c8a3133e00d8e6d8859dc7d#C#compaction#366 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 53.85 MB/second 2024-11-27T16:22:50,720 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/.tmp/C/0ab8ef2522fa44919214cb2cbb400b61 is 50, key is test_row_0/C:col10/1732724569332/Put/seqid=0 2024-11-27T16:22:50,737 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:50,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33056 deadline: 1732724630732, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:50,743 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:50,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33116 deadline: 1732724630735, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:50,743 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:50,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33092 deadline: 1732724630736, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:50,744 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:50,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33110 deadline: 1732724630738, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:50,763 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742258_1434 (size=48389) 2024-11-27T16:22:50,765 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=213, memsize=26.8 K, hasBloomFilter=true, into tmp file hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/.tmp/A/9c9f94791b7e4cc3afbe109e44a23853 2024-11-27T16:22:50,803 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742259_1435 (size=12595) 2024-11-27T16:22:50,803 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:22:50,803 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=109 2024-11-27T16:22:50,805 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d. 2024-11-27T16:22:50,805 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d. as already flushing 2024-11-27T16:22:50,805 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d. 2024-11-27T16:22:50,805 ERROR [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] handler.RSProcedureHandler(58): pid=109 java.io.IOException: Unable to complete flush {ENCODED => a4d3cff76c8a3133e00d8e6d8859dc7d, NAME => 'TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:22:50,805 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=109 java.io.IOException: Unable to complete flush {ENCODED => a4d3cff76c8a3133e00d8e6d8859dc7d, NAME => 'TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:22:50,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4114): Remote procedure failed, pid=109 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a4d3cff76c8a3133e00d8e6d8859dc7d, NAME => 'TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a4d3cff76c8a3133e00d8e6d8859dc7d, NAME => 'TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:22:50,815 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/.tmp/B/e525543283ac4447b311710db8760848 is 50, key is test_row_0/B:col10/1732724570644/Put/seqid=0 2024-11-27T16:22:50,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=108 2024-11-27T16:22:50,835 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742260_1436 (size=12151) 2024-11-27T16:22:50,837 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=213 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/.tmp/B/e525543283ac4447b311710db8760848 2024-11-27T16:22:50,846 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:50,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33056 deadline: 1732724630841, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:50,850 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:50,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33116 deadline: 1732724630844, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:50,850 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:50,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33092 deadline: 1732724630845, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:50,851 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:50,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33110 deadline: 1732724630846, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:50,861 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/.tmp/C/1584fee575254e56a6107a3adb2db508 is 50, key is test_row_0/C:col10/1732724570644/Put/seqid=0 2024-11-27T16:22:50,900 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742261_1437 (size=12151) 2024-11-27T16:22:50,902 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=213 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/.tmp/C/1584fee575254e56a6107a3adb2db508 2024-11-27T16:22:50,908 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/.tmp/A/9c9f94791b7e4cc3afbe109e44a23853 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/A/9c9f94791b7e4cc3afbe109e44a23853 2024-11-27T16:22:50,918 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/A/9c9f94791b7e4cc3afbe109e44a23853, entries=250, sequenceid=213, filesize=47.3 K 2024-11-27T16:22:50,919 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/.tmp/B/e525543283ac4447b311710db8760848 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/B/e525543283ac4447b311710db8760848 2024-11-27T16:22:50,923 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/B/e525543283ac4447b311710db8760848, entries=150, sequenceid=213, filesize=11.9 K 2024-11-27T16:22:50,924 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/.tmp/C/1584fee575254e56a6107a3adb2db508 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/C/1584fee575254e56a6107a3adb2db508 2024-11-27T16:22:50,928 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/C/1584fee575254e56a6107a3adb2db508, entries=150, sequenceid=213, filesize=11.9 K 2024-11-27T16:22:50,929 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=120.76 KB/123660 for a4d3cff76c8a3133e00d8e6d8859dc7d in 282ms, sequenceid=213, compaction requested=false 2024-11-27T16:22:50,929 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for a4d3cff76c8a3133e00d8e6d8859dc7d: 2024-11-27T16:22:50,957 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:22:50,958 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=109 2024-11-27T16:22:50,958 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d. 2024-11-27T16:22:50,958 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegion(2837): Flushing a4d3cff76c8a3133e00d8e6d8859dc7d 3/3 column families, dataSize=120.76 KB heapSize=317.16 KB 2024-11-27T16:22:50,959 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a4d3cff76c8a3133e00d8e6d8859dc7d, store=A 2024-11-27T16:22:50,959 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:22:50,959 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a4d3cff76c8a3133e00d8e6d8859dc7d, store=B 2024-11-27T16:22:50,959 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:22:50,959 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a4d3cff76c8a3133e00d8e6d8859dc7d, store=C 2024-11-27T16:22:50,959 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:22:50,995 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241127f305dadb7b1b4165a72b644a316c793d_a4d3cff76c8a3133e00d8e6d8859dc7d is 50, key is test_row_0/A:col10/1732724570687/Put/seqid=0 2024-11-27T16:22:51,024 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742262_1438 (size=12304) 2024-11-27T16:22:51,025 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:22:51,028 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241127f305dadb7b1b4165a72b644a316c793d_a4d3cff76c8a3133e00d8e6d8859dc7d to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241127f305dadb7b1b4165a72b644a316c793d_a4d3cff76c8a3133e00d8e6d8859dc7d 2024-11-27T16:22:51,029 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/.tmp/B/b72723bc276c48649a86dff9a35aa36d as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/B/b72723bc276c48649a86dff9a35aa36d 2024-11-27T16:22:51,030 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/.tmp/A/9c1f4deac323410bbc34f1a3d0740299, store: [table=TestAcidGuarantees family=A region=a4d3cff76c8a3133e00d8e6d8859dc7d] 2024-11-27T16:22:51,030 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/.tmp/A/9c1f4deac323410bbc34f1a3d0740299 is 175, key is test_row_0/A:col10/1732724570687/Put/seqid=0 2024-11-27T16:22:51,037 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in a4d3cff76c8a3133e00d8e6d8859dc7d/B of a4d3cff76c8a3133e00d8e6d8859dc7d into b72723bc276c48649a86dff9a35aa36d(size=12.3 K), total size for store is 24.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T16:22:51,037 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for a4d3cff76c8a3133e00d8e6d8859dc7d: 2024-11-27T16:22:51,037 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d., storeName=a4d3cff76c8a3133e00d8e6d8859dc7d/B, priority=12, startTime=1732724570519; duration=0sec 2024-11-27T16:22:51,037 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T16:22:51,037 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a4d3cff76c8a3133e00d8e6d8859dc7d:B 2024-11-27T16:22:51,055 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742263_1439 (size=31105) 2024-11-27T16:22:51,056 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=235, memsize=40.3 K, hasBloomFilter=true, into tmp file hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/.tmp/A/9c1f4deac323410bbc34f1a3d0740299 2024-11-27T16:22:51,057 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d. as already flushing 2024-11-27T16:22:51,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(8581): Flush requested on a4d3cff76c8a3133e00d8e6d8859dc7d 2024-11-27T16:22:51,078 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/.tmp/B/d3ed781addc644b3af6dcfbb991806b2 is 50, key is test_row_0/B:col10/1732724570687/Put/seqid=0 2024-11-27T16:22:51,085 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742264_1440 (size=12151) 2024-11-27T16:22:51,089 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=235 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/.tmp/B/d3ed781addc644b3af6dcfbb991806b2 2024-11-27T16:22:51,100 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:51,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33092 deadline: 1732724631084, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:51,106 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:51,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33116 deadline: 1732724631087, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:51,106 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/.tmp/C/5e43faab3a084427a7c41eb43f4872e5 is 50, key is test_row_0/C:col10/1732724570687/Put/seqid=0 2024-11-27T16:22:51,118 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:51,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33110 deadline: 1732724631101, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:51,124 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:51,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33056 deadline: 1732724631110, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:51,131 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742265_1441 (size=12151) 2024-11-27T16:22:51,132 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=235 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/.tmp/C/5e43faab3a084427a7c41eb43f4872e5 2024-11-27T16:22:51,138 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/.tmp/A/9c1f4deac323410bbc34f1a3d0740299 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/A/9c1f4deac323410bbc34f1a3d0740299 2024-11-27T16:22:51,144 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/A/9c1f4deac323410bbc34f1a3d0740299, entries=150, sequenceid=235, filesize=30.4 K 2024-11-27T16:22:51,145 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/.tmp/B/d3ed781addc644b3af6dcfbb991806b2 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/B/d3ed781addc644b3af6dcfbb991806b2 2024-11-27T16:22:51,152 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/B/d3ed781addc644b3af6dcfbb991806b2, entries=150, sequenceid=235, filesize=11.9 K 2024-11-27T16:22:51,154 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/.tmp/C/5e43faab3a084427a7c41eb43f4872e5 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/C/5e43faab3a084427a7c41eb43f4872e5 2024-11-27T16:22:51,157 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/C/5e43faab3a084427a7c41eb43f4872e5, entries=150, sequenceid=235, filesize=11.9 K 2024-11-27T16:22:51,159 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegion(3040): Finished flush of dataSize ~120.76 KB/123660, heapSize ~317.11 KB/324720, currentSize=87.22 KB/89310 for a4d3cff76c8a3133e00d8e6d8859dc7d in 200ms, sequenceid=235, compaction requested=true 2024-11-27T16:22:51,159 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegion(2538): Flush status journal for a4d3cff76c8a3133e00d8e6d8859dc7d: 2024-11-27T16:22:51,159 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d. 2024-11-27T16:22:51,159 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=109 2024-11-27T16:22:51,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4106): Remote procedure done, pid=109 2024-11-27T16:22:51,162 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=109, resume processing ppid=108 2024-11-27T16:22:51,162 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=109, ppid=108, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.4460 sec 2024-11-27T16:22:51,163 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=108, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=108, table=TestAcidGuarantees in 1.4530 sec 2024-11-27T16:22:51,209 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing a4d3cff76c8a3133e00d8e6d8859dc7d 3/3 column families, dataSize=93.93 KB heapSize=246.84 KB 2024-11-27T16:22:51,210 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a4d3cff76c8a3133e00d8e6d8859dc7d, store=A 2024-11-27T16:22:51,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(8581): Flush requested on a4d3cff76c8a3133e00d8e6d8859dc7d 2024-11-27T16:22:51,215 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:22:51,230 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a4d3cff76c8a3133e00d8e6d8859dc7d, store=B 2024-11-27T16:22:51,230 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:22:51,230 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a4d3cff76c8a3133e00d8e6d8859dc7d, store=C 2024-11-27T16:22:51,230 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:22:51,230 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/.tmp/C/0ab8ef2522fa44919214cb2cbb400b61 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/C/0ab8ef2522fa44919214cb2cbb400b61 2024-11-27T16:22:51,237 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in a4d3cff76c8a3133e00d8e6d8859dc7d/C of a4d3cff76c8a3133e00d8e6d8859dc7d into 0ab8ef2522fa44919214cb2cbb400b61(size=12.3 K), total size for store is 36.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T16:22:51,237 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for a4d3cff76c8a3133e00d8e6d8859dc7d: 2024-11-27T16:22:51,237 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d., storeName=a4d3cff76c8a3133e00d8e6d8859dc7d/C, priority=12, startTime=1732724570519; duration=0sec 2024-11-27T16:22:51,237 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T16:22:51,237 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a4d3cff76c8a3133e00d8e6d8859dc7d:C 2024-11-27T16:22:51,262 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241127c62b386e751d4cc6971acb69335509a5_a4d3cff76c8a3133e00d8e6d8859dc7d is 50, key is test_row_0/A:col10/1732724571086/Put/seqid=0 2024-11-27T16:22:51,292 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-27T16:22:51,296 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:51,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33056 deadline: 1732724631279, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:51,307 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:51,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33110 deadline: 1732724631294, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:51,310 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:51,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33116 deadline: 1732724631296, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:51,311 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:51,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33092 deadline: 1732724631297, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:51,312 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742266_1442 (size=12304) 2024-11-27T16:22:51,412 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:51,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33056 deadline: 1732724631398, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:51,422 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:51,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33110 deadline: 1732724631408, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:51,422 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:51,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33116 deadline: 1732724631411, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:51,423 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:51,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33092 deadline: 1732724631412, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:51,624 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:51,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33056 deadline: 1732724631614, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:51,630 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:51,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33110 deadline: 1732724631623, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:51,633 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:51,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33116 deadline: 1732724631625, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:51,634 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:51,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33092 deadline: 1732724631625, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:51,713 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:22:51,717 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241127c62b386e751d4cc6971acb69335509a5_a4d3cff76c8a3133e00d8e6d8859dc7d to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241127c62b386e751d4cc6971acb69335509a5_a4d3cff76c8a3133e00d8e6d8859dc7d 2024-11-27T16:22:51,718 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/.tmp/A/a7764fd5d40343c6b6640a0a3d2b9444, store: [table=TestAcidGuarantees family=A region=a4d3cff76c8a3133e00d8e6d8859dc7d] 2024-11-27T16:22:51,719 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/.tmp/A/a7764fd5d40343c6b6640a0a3d2b9444 is 175, key is test_row_0/A:col10/1732724571086/Put/seqid=0 2024-11-27T16:22:51,761 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742267_1443 (size=31105) 2024-11-27T16:22:51,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=108 2024-11-27T16:22:51,818 INFO [Thread-1701 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 108 completed 2024-11-27T16:22:51,819 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-27T16:22:51,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] procedure2.ProcedureExecutor(1098): Stored pid=110, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=110, table=TestAcidGuarantees 2024-11-27T16:22:51,821 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=110, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=110, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-27T16:22:51,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=110 2024-11-27T16:22:51,823 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=110, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=110, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-27T16:22:51,823 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=111, ppid=110, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-27T16:22:51,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=110 2024-11-27T16:22:51,933 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:51,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33056 deadline: 1732724631927, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:51,944 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:51,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33110 deadline: 1732724631933, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:51,945 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:51,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33116 deadline: 1732724631936, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:51,946 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:51,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33092 deadline: 1732724631936, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:51,974 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:22:51,975 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=111 2024-11-27T16:22:51,975 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d. 2024-11-27T16:22:51,975 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d. as already flushing 2024-11-27T16:22:51,975 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d. 2024-11-27T16:22:51,975 ERROR [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] handler.RSProcedureHandler(58): pid=111 java.io.IOException: Unable to complete flush {ENCODED => a4d3cff76c8a3133e00d8e6d8859dc7d, NAME => 'TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:22:51,975 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=111 java.io.IOException: Unable to complete flush {ENCODED => a4d3cff76c8a3133e00d8e6d8859dc7d, NAME => 'TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:22:51,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4114): Remote procedure failed, pid=111 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a4d3cff76c8a3133e00d8e6d8859dc7d, NAME => 'TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a4d3cff76c8a3133e00d8e6d8859dc7d, NAME => 'TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:22:52,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=110 2024-11-27T16:22:52,128 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:22:52,128 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=111 2024-11-27T16:22:52,128 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d. 2024-11-27T16:22:52,128 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d. as already flushing 2024-11-27T16:22:52,128 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d. 2024-11-27T16:22:52,128 ERROR [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] handler.RSProcedureHandler(58): pid=111 java.io.IOException: Unable to complete flush {ENCODED => a4d3cff76c8a3133e00d8e6d8859dc7d, NAME => 'TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:22:52,129 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=111 java.io.IOException: Unable to complete flush {ENCODED => a4d3cff76c8a3133e00d8e6d8859dc7d, NAME => 'TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:22:52,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4114): Remote procedure failed, pid=111 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a4d3cff76c8a3133e00d8e6d8859dc7d, NAME => 'TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a4d3cff76c8a3133e00d8e6d8859dc7d, NAME => 'TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:22:52,162 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=253, memsize=31.3 K, hasBloomFilter=true, into tmp file hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/.tmp/A/a7764fd5d40343c6b6640a0a3d2b9444 2024-11-27T16:22:52,181 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/.tmp/B/aa8b3d8d19564defa6267ae99a625d20 is 50, key is test_row_0/B:col10/1732724571086/Put/seqid=0 2024-11-27T16:22:52,216 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742268_1444 (size=12151) 2024-11-27T16:22:52,281 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:22:52,281 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=111 2024-11-27T16:22:52,281 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d. 2024-11-27T16:22:52,282 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d. as already flushing 2024-11-27T16:22:52,282 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d. 2024-11-27T16:22:52,282 ERROR [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] handler.RSProcedureHandler(58): pid=111 java.io.IOException: Unable to complete flush {ENCODED => a4d3cff76c8a3133e00d8e6d8859dc7d, NAME => 'TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:22:52,282 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=111 java.io.IOException: Unable to complete flush {ENCODED => a4d3cff76c8a3133e00d8e6d8859dc7d, NAME => 'TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:22:52,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4114): Remote procedure failed, pid=111 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a4d3cff76c8a3133e00d8e6d8859dc7d, NAME => 'TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a4d3cff76c8a3133e00d8e6d8859dc7d, NAME => 'TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:22:52,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=110 2024-11-27T16:22:52,433 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:22:52,433 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=111 2024-11-27T16:22:52,434 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d. 2024-11-27T16:22:52,434 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d. as already flushing 2024-11-27T16:22:52,434 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d. 2024-11-27T16:22:52,434 ERROR [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] handler.RSProcedureHandler(58): pid=111 java.io.IOException: Unable to complete flush {ENCODED => a4d3cff76c8a3133e00d8e6d8859dc7d, NAME => 'TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:22:52,434 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=111 java.io.IOException: Unable to complete flush {ENCODED => a4d3cff76c8a3133e00d8e6d8859dc7d, NAME => 'TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:22:52,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4114): Remote procedure failed, pid=111 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a4d3cff76c8a3133e00d8e6d8859dc7d, NAME => 'TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a4d3cff76c8a3133e00d8e6d8859dc7d, NAME => 'TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:22:52,446 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:52,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33056 deadline: 1732724632435, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:52,459 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:52,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33092 deadline: 1732724632448, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:52,460 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:52,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33116 deadline: 1732724632449, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:52,461 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:52,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33110 deadline: 1732724632449, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:52,586 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:22:52,587 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=111 2024-11-27T16:22:52,587 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d. 2024-11-27T16:22:52,587 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d. as already flushing 2024-11-27T16:22:52,587 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d. 2024-11-27T16:22:52,587 ERROR [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] handler.RSProcedureHandler(58): pid=111 java.io.IOException: Unable to complete flush {ENCODED => a4d3cff76c8a3133e00d8e6d8859dc7d, NAME => 'TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:22:52,587 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=111 java.io.IOException: Unable to complete flush {ENCODED => a4d3cff76c8a3133e00d8e6d8859dc7d, NAME => 'TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:22:52,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4114): Remote procedure failed, pid=111 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a4d3cff76c8a3133e00d8e6d8859dc7d, NAME => 'TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a4d3cff76c8a3133e00d8e6d8859dc7d, NAME => 'TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:22:52,616 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=253 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/.tmp/B/aa8b3d8d19564defa6267ae99a625d20 2024-11-27T16:22:52,641 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/.tmp/C/d5183e963374479b9f43913b1fb1cb04 is 50, key is test_row_0/C:col10/1732724571086/Put/seqid=0 2024-11-27T16:22:52,678 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742269_1445 (size=12151) 2024-11-27T16:22:52,684 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=253 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/.tmp/C/d5183e963374479b9f43913b1fb1cb04 2024-11-27T16:22:52,689 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/.tmp/A/a7764fd5d40343c6b6640a0a3d2b9444 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/A/a7764fd5d40343c6b6640a0a3d2b9444 2024-11-27T16:22:52,695 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/A/a7764fd5d40343c6b6640a0a3d2b9444, entries=150, sequenceid=253, filesize=30.4 K 2024-11-27T16:22:52,696 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/.tmp/B/aa8b3d8d19564defa6267ae99a625d20 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/B/aa8b3d8d19564defa6267ae99a625d20 2024-11-27T16:22:52,700 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/B/aa8b3d8d19564defa6267ae99a625d20, entries=150, sequenceid=253, filesize=11.9 K 2024-11-27T16:22:52,701 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/.tmp/C/d5183e963374479b9f43913b1fb1cb04 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/C/d5183e963374479b9f43913b1fb1cb04 2024-11-27T16:22:52,704 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/C/d5183e963374479b9f43913b1fb1cb04, entries=150, sequenceid=253, filesize=11.9 K 2024-11-27T16:22:52,705 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~93.93 KB/96180, heapSize ~246.80 KB/252720, currentSize=114.05 KB/116790 for a4d3cff76c8a3133e00d8e6d8859dc7d in 1496ms, sequenceid=253, compaction requested=true 2024-11-27T16:22:52,705 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for a4d3cff76c8a3133e00d8e6d8859dc7d: 2024-11-27T16:22:52,705 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a4d3cff76c8a3133e00d8e6d8859dc7d:A, priority=-2147483648, current under compaction store size is 1 2024-11-27T16:22:52,705 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T16:22:52,705 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-27T16:22:52,705 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a4d3cff76c8a3133e00d8e6d8859dc7d:B, priority=-2147483648, current under compaction store size is 2 2024-11-27T16:22:52,705 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T16:22:52,705 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a4d3cff76c8a3133e00d8e6d8859dc7d:C, priority=-2147483648, current under compaction store size is 3 2024-11-27T16:22:52,705 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-27T16:22:52,705 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-27T16:22:52,707 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49048 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-27T16:22:52,707 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HStore(1540): a4d3cff76c8a3133e00d8e6d8859dc7d/B is initiating minor compaction (all files) 2024-11-27T16:22:52,707 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of a4d3cff76c8a3133e00d8e6d8859dc7d/B in TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d. 2024-11-27T16:22:52,707 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/B/b72723bc276c48649a86dff9a35aa36d, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/B/e525543283ac4447b311710db8760848, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/B/d3ed781addc644b3af6dcfbb991806b2, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/B/aa8b3d8d19564defa6267ae99a625d20] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/.tmp, totalSize=47.9 K 2024-11-27T16:22:52,707 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 142148 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-27T16:22:52,707 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HStore(1540): a4d3cff76c8a3133e00d8e6d8859dc7d/A is initiating minor compaction (all files) 2024-11-27T16:22:52,707 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of a4d3cff76c8a3133e00d8e6d8859dc7d/A in TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d. 2024-11-27T16:22:52,708 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/A/15fcda054bdb4ca6bd4a9868610805f6, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/A/9c9f94791b7e4cc3afbe109e44a23853, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/A/9c1f4deac323410bbc34f1a3d0740299, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/A/a7764fd5d40343c6b6640a0a3d2b9444] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/.tmp, totalSize=138.8 K 2024-11-27T16:22:52,708 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=12 throughput controller=DefaultCompactionThroughputController [maxThroughput=53.85 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d. 2024-11-27T16:22:52,708 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d. files: [hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/A/15fcda054bdb4ca6bd4a9868610805f6, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/A/9c9f94791b7e4cc3afbe109e44a23853, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/A/9c1f4deac323410bbc34f1a3d0740299, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/A/a7764fd5d40343c6b6640a0a3d2b9444] 2024-11-27T16:22:52,708 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting b72723bc276c48649a86dff9a35aa36d, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=198, earliestPutTs=1732724569323 2024-11-27T16:22:52,708 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 15fcda054bdb4ca6bd4a9868610805f6, keycount=150, bloomtype=ROW, size=30.8 K, encoding=NONE, compression=NONE, seqNum=198, earliestPutTs=1732724569323 2024-11-27T16:22:52,708 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting e525543283ac4447b311710db8760848, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=213, earliestPutTs=1732724569468 2024-11-27T16:22:52,709 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9c9f94791b7e4cc3afbe109e44a23853, keycount=250, bloomtype=ROW, size=47.3 K, encoding=NONE, compression=NONE, seqNum=213, earliestPutTs=1732724569453 2024-11-27T16:22:52,709 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting d3ed781addc644b3af6dcfbb991806b2, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=235, earliestPutTs=1732724570687 2024-11-27T16:22:52,709 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9c1f4deac323410bbc34f1a3d0740299, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=235, earliestPutTs=1732724570687 2024-11-27T16:22:52,709 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting aa8b3d8d19564defa6267ae99a625d20, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=253, earliestPutTs=1732724571056 2024-11-27T16:22:52,709 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.Compactor(224): Compacting a7764fd5d40343c6b6640a0a3d2b9444, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=253, earliestPutTs=1732724571056 2024-11-27T16:22:52,731 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): a4d3cff76c8a3133e00d8e6d8859dc7d#B#compaction#375 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 53.85 MB/second 2024-11-27T16:22:52,732 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/.tmp/B/cfdf879a24d44ce6a3082350b2823a8d is 50, key is test_row_0/B:col10/1732724571086/Put/seqid=0 2024-11-27T16:22:52,739 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:22:52,740 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=111 2024-11-27T16:22:52,740 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d. 2024-11-27T16:22:52,740 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegion(2837): Flushing a4d3cff76c8a3133e00d8e6d8859dc7d 3/3 column families, dataSize=114.05 KB heapSize=299.58 KB 2024-11-27T16:22:52,740 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a4d3cff76c8a3133e00d8e6d8859dc7d, store=A 2024-11-27T16:22:52,740 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:22:52,740 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a4d3cff76c8a3133e00d8e6d8859dc7d, store=B 2024-11-27T16:22:52,741 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:22:52,741 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a4d3cff76c8a3133e00d8e6d8859dc7d, store=C 2024-11-27T16:22:52,741 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:22:52,741 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=a4d3cff76c8a3133e00d8e6d8859dc7d] 2024-11-27T16:22:52,783 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241127ea69c41dbbcd4791b08d2264121f5e45_a4d3cff76c8a3133e00d8e6d8859dc7d store=[table=TestAcidGuarantees family=A region=a4d3cff76c8a3133e00d8e6d8859dc7d] 2024-11-27T16:22:52,786 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241127ea69c41dbbcd4791b08d2264121f5e45_a4d3cff76c8a3133e00d8e6d8859dc7d, store=[table=TestAcidGuarantees family=A region=a4d3cff76c8a3133e00d8e6d8859dc7d] 2024-11-27T16:22:52,786 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241127ea69c41dbbcd4791b08d2264121f5e45_a4d3cff76c8a3133e00d8e6d8859dc7d because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=a4d3cff76c8a3133e00d8e6d8859dc7d] 2024-11-27T16:22:52,788 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112702f5f881dba84dec8357c2390047b1a9_a4d3cff76c8a3133e00d8e6d8859dc7d is 50, key is test_row_0/A:col10/1732724571269/Put/seqid=0 2024-11-27T16:22:52,797 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742270_1446 (size=12731) 2024-11-27T16:22:52,806 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/.tmp/B/cfdf879a24d44ce6a3082350b2823a8d as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/B/cfdf879a24d44ce6a3082350b2823a8d 2024-11-27T16:22:52,812 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in a4d3cff76c8a3133e00d8e6d8859dc7d/B of a4d3cff76c8a3133e00d8e6d8859dc7d into cfdf879a24d44ce6a3082350b2823a8d(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T16:22:52,812 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for a4d3cff76c8a3133e00d8e6d8859dc7d: 2024-11-27T16:22:52,812 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d., storeName=a4d3cff76c8a3133e00d8e6d8859dc7d/B, priority=12, startTime=1732724572705; duration=0sec 2024-11-27T16:22:52,812 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-27T16:22:52,812 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a4d3cff76c8a3133e00d8e6d8859dc7d:B 2024-11-27T16:22:52,812 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-27T16:22:52,814 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49048 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-27T16:22:52,814 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HStore(1540): a4d3cff76c8a3133e00d8e6d8859dc7d/C is initiating minor compaction (all files) 2024-11-27T16:22:52,814 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of a4d3cff76c8a3133e00d8e6d8859dc7d/C in TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d. 2024-11-27T16:22:52,814 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/C/0ab8ef2522fa44919214cb2cbb400b61, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/C/1584fee575254e56a6107a3adb2db508, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/C/5e43faab3a084427a7c41eb43f4872e5, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/C/d5183e963374479b9f43913b1fb1cb04] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/.tmp, totalSize=47.9 K 2024-11-27T16:22:52,815 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting 0ab8ef2522fa44919214cb2cbb400b61, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=198, earliestPutTs=1732724569323 2024-11-27T16:22:52,815 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting 1584fee575254e56a6107a3adb2db508, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=213, earliestPutTs=1732724569468 2024-11-27T16:22:52,815 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting 5e43faab3a084427a7c41eb43f4872e5, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=235, earliestPutTs=1732724570687 2024-11-27T16:22:52,816 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting d5183e963374479b9f43913b1fb1cb04, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=253, earliestPutTs=1732724571056 2024-11-27T16:22:52,821 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742271_1447 (size=4469) 2024-11-27T16:22:52,823 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): a4d3cff76c8a3133e00d8e6d8859dc7d#A#compaction#376 average throughput is 0.30 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 53.85 MB/second 2024-11-27T16:22:52,823 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/.tmp/A/275fa9bd383f4d44b2ad7925683a3eae is 175, key is test_row_0/A:col10/1732724571086/Put/seqid=0 2024-11-27T16:22:52,833 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742272_1448 (size=12454) 2024-11-27T16:22:52,853 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): a4d3cff76c8a3133e00d8e6d8859dc7d#C#compaction#378 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 53.85 MB/second 2024-11-27T16:22:52,854 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/.tmp/C/0fc9384662a2483889d7e4d03dec6be9 is 50, key is test_row_0/C:col10/1732724571086/Put/seqid=0 2024-11-27T16:22:52,881 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742273_1449 (size=31685) 2024-11-27T16:22:52,917 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742274_1450 (size=12731) 2024-11-27T16:22:52,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=110 2024-11-27T16:22:53,234 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:22:53,239 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112702f5f881dba84dec8357c2390047b1a9_a4d3cff76c8a3133e00d8e6d8859dc7d to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112702f5f881dba84dec8357c2390047b1a9_a4d3cff76c8a3133e00d8e6d8859dc7d 2024-11-27T16:22:53,241 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/.tmp/A/b86a1df4c3c14543a9af2dfd8f5a63e7, store: [table=TestAcidGuarantees family=A region=a4d3cff76c8a3133e00d8e6d8859dc7d] 2024-11-27T16:22:53,242 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/.tmp/A/b86a1df4c3c14543a9af2dfd8f5a63e7 is 175, key is test_row_0/A:col10/1732724571269/Put/seqid=0 2024-11-27T16:22:53,269 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742275_1451 (size=31255) 2024-11-27T16:22:53,288 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/.tmp/A/275fa9bd383f4d44b2ad7925683a3eae as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/A/275fa9bd383f4d44b2ad7925683a3eae 2024-11-27T16:22:53,294 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in a4d3cff76c8a3133e00d8e6d8859dc7d/A of a4d3cff76c8a3133e00d8e6d8859dc7d into 275fa9bd383f4d44b2ad7925683a3eae(size=30.9 K), total size for store is 30.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T16:22:53,294 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for a4d3cff76c8a3133e00d8e6d8859dc7d: 2024-11-27T16:22:53,294 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d., storeName=a4d3cff76c8a3133e00d8e6d8859dc7d/A, priority=12, startTime=1732724572705; duration=0sec 2024-11-27T16:22:53,294 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T16:22:53,294 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a4d3cff76c8a3133e00d8e6d8859dc7d:A 2024-11-27T16:22:53,329 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/.tmp/C/0fc9384662a2483889d7e4d03dec6be9 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/C/0fc9384662a2483889d7e4d03dec6be9 2024-11-27T16:22:53,333 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in a4d3cff76c8a3133e00d8e6d8859dc7d/C of a4d3cff76c8a3133e00d8e6d8859dc7d into 0fc9384662a2483889d7e4d03dec6be9(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T16:22:53,333 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for a4d3cff76c8a3133e00d8e6d8859dc7d: 2024-11-27T16:22:53,333 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d., storeName=a4d3cff76c8a3133e00d8e6d8859dc7d/C, priority=12, startTime=1732724572705; duration=0sec 2024-11-27T16:22:53,333 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T16:22:53,333 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a4d3cff76c8a3133e00d8e6d8859dc7d:C 2024-11-27T16:22:53,458 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d. as already flushing 2024-11-27T16:22:53,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(8581): Flush requested on a4d3cff76c8a3133e00d8e6d8859dc7d 2024-11-27T16:22:53,502 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:53,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33056 deadline: 1732724633492, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:53,502 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:53,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33092 deadline: 1732724633492, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:53,503 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:53,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33110 deadline: 1732724633493, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:53,503 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:53,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 147 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33116 deadline: 1732724633494, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:53,605 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:53,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33056 deadline: 1732724633604, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:53,606 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:53,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33110 deadline: 1732724633604, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:53,606 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:53,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33092 deadline: 1732724633604, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:53,611 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:53,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 149 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33116 deadline: 1732724633605, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:53,670 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=274, memsize=38.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/.tmp/A/b86a1df4c3c14543a9af2dfd8f5a63e7 2024-11-27T16:22:53,689 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/.tmp/B/66cc98dbf0494fd0a9569800b2f95a25 is 50, key is test_row_0/B:col10/1732724571269/Put/seqid=0 2024-11-27T16:22:53,699 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742276_1452 (size=12301) 2024-11-27T16:22:53,706 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=274 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/.tmp/B/66cc98dbf0494fd0a9569800b2f95a25 2024-11-27T16:22:53,713 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/.tmp/C/65433dd7583c49c4b6332d42adb0b55e is 50, key is test_row_0/C:col10/1732724571269/Put/seqid=0 2024-11-27T16:22:53,749 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742277_1453 (size=12301) 2024-11-27T16:22:53,750 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=274 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/.tmp/C/65433dd7583c49c4b6332d42adb0b55e 2024-11-27T16:22:53,757 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/.tmp/A/b86a1df4c3c14543a9af2dfd8f5a63e7 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/A/b86a1df4c3c14543a9af2dfd8f5a63e7 2024-11-27T16:22:53,766 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/A/b86a1df4c3c14543a9af2dfd8f5a63e7, entries=150, sequenceid=274, filesize=30.5 K 2024-11-27T16:22:53,768 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/.tmp/B/66cc98dbf0494fd0a9569800b2f95a25 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/B/66cc98dbf0494fd0a9569800b2f95a25 2024-11-27T16:22:53,772 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/B/66cc98dbf0494fd0a9569800b2f95a25, entries=150, sequenceid=274, filesize=12.0 K 2024-11-27T16:22:53,774 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/.tmp/C/65433dd7583c49c4b6332d42adb0b55e as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/C/65433dd7583c49c4b6332d42adb0b55e 2024-11-27T16:22:53,781 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/C/65433dd7583c49c4b6332d42adb0b55e, entries=150, sequenceid=274, filesize=12.0 K 2024-11-27T16:22:53,787 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegion(3040): Finished flush of dataSize ~114.05 KB/116790, heapSize ~299.53 KB/306720, currentSize=93.93 KB/96180 for a4d3cff76c8a3133e00d8e6d8859dc7d in 1046ms, sequenceid=274, compaction requested=false 2024-11-27T16:22:53,787 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegion(2538): Flush status journal for a4d3cff76c8a3133e00d8e6d8859dc7d: 2024-11-27T16:22:53,787 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d. 2024-11-27T16:22:53,788 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=111 2024-11-27T16:22:53,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4106): Remote procedure done, pid=111 2024-11-27T16:22:53,792 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=111, resume processing ppid=110 2024-11-27T16:22:53,792 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=111, ppid=110, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.9680 sec 2024-11-27T16:22:53,794 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=110, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=110, table=TestAcidGuarantees in 1.9730 sec 2024-11-27T16:22:53,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(8581): Flush requested on a4d3cff76c8a3133e00d8e6d8859dc7d 2024-11-27T16:22:53,816 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing a4d3cff76c8a3133e00d8e6d8859dc7d 3/3 column families, dataSize=100.63 KB heapSize=264.42 KB 2024-11-27T16:22:53,816 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a4d3cff76c8a3133e00d8e6d8859dc7d, store=A 2024-11-27T16:22:53,817 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:22:53,817 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a4d3cff76c8a3133e00d8e6d8859dc7d, store=B 2024-11-27T16:22:53,817 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:22:53,817 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a4d3cff76c8a3133e00d8e6d8859dc7d, store=C 2024-11-27T16:22:53,817 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:22:53,837 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411275e0c6dcb48a942cb9df26b37d3172753_a4d3cff76c8a3133e00d8e6d8859dc7d is 50, key is test_row_0/A:col10/1732724573489/Put/seqid=0 2024-11-27T16:22:53,857 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:53,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33110 deadline: 1732724633847, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:53,865 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:53,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 155 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33116 deadline: 1732724633855, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:53,866 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:53,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33056 deadline: 1732724633856, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:53,867 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:53,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 155 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33092 deadline: 1732724633857, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:53,874 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742278_1454 (size=14994) 2024-11-27T16:22:53,882 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:22:53,886 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411275e0c6dcb48a942cb9df26b37d3172753_a4d3cff76c8a3133e00d8e6d8859dc7d to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411275e0c6dcb48a942cb9df26b37d3172753_a4d3cff76c8a3133e00d8e6d8859dc7d 2024-11-27T16:22:53,887 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/.tmp/A/56edad44d3934d75b9b896bca88f5221, store: [table=TestAcidGuarantees family=A region=a4d3cff76c8a3133e00d8e6d8859dc7d] 2024-11-27T16:22:53,888 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/.tmp/A/56edad44d3934d75b9b896bca88f5221 is 175, key is test_row_0/A:col10/1732724573489/Put/seqid=0 2024-11-27T16:22:53,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=110 2024-11-27T16:22:53,928 INFO [Thread-1701 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 110 completed 2024-11-27T16:22:53,929 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742279_1455 (size=39949) 2024-11-27T16:22:53,930 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-27T16:22:53,930 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=295, memsize=33.5 K, hasBloomFilter=true, into tmp file hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/.tmp/A/56edad44d3934d75b9b896bca88f5221 2024-11-27T16:22:53,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] procedure2.ProcedureExecutor(1098): Stored pid=112, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=112, table=TestAcidGuarantees 2024-11-27T16:22:53,931 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=112, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=112, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-27T16:22:53,932 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=112, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=112, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-27T16:22:53,932 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=113, ppid=112, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-27T16:22:53,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=112 2024-11-27T16:22:53,945 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/.tmp/B/448b79929c21406bb360fe3b3f0648ba is 50, key is test_row_0/B:col10/1732724573489/Put/seqid=0 2024-11-27T16:22:53,967 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:53,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33110 deadline: 1732724633964, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:53,975 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:53,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 157 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33116 deadline: 1732724633966, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:53,976 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:53,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33056 deadline: 1732724633967, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:53,977 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742280_1456 (size=12301) 2024-11-27T16:22:53,978 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:53,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 157 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33092 deadline: 1732724633972, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:53,978 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=295 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/.tmp/B/448b79929c21406bb360fe3b3f0648ba 2024-11-27T16:22:54,006 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/.tmp/C/f92db1d2314245958ee03195ffa86f35 is 50, key is test_row_0/C:col10/1732724573489/Put/seqid=0 2024-11-27T16:22:54,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=112 2024-11-27T16:22:54,045 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742281_1457 (size=12301) 2024-11-27T16:22:54,049 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=295 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/.tmp/C/f92db1d2314245958ee03195ffa86f35 2024-11-27T16:22:54,059 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/.tmp/A/56edad44d3934d75b9b896bca88f5221 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/A/56edad44d3934d75b9b896bca88f5221 2024-11-27T16:22:54,074 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/A/56edad44d3934d75b9b896bca88f5221, entries=200, sequenceid=295, filesize=39.0 K 2024-11-27T16:22:54,075 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/.tmp/B/448b79929c21406bb360fe3b3f0648ba as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/B/448b79929c21406bb360fe3b3f0648ba 2024-11-27T16:22:54,083 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/B/448b79929c21406bb360fe3b3f0648ba, entries=150, sequenceid=295, filesize=12.0 K 2024-11-27T16:22:54,084 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/.tmp/C/f92db1d2314245958ee03195ffa86f35 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/C/f92db1d2314245958ee03195ffa86f35 2024-11-27T16:22:54,088 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:22:54,089 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=113 2024-11-27T16:22:54,089 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d. 2024-11-27T16:22:54,089 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d. as already flushing 2024-11-27T16:22:54,089 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d. 2024-11-27T16:22:54,089 ERROR [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] handler.RSProcedureHandler(58): pid=113 java.io.IOException: Unable to complete flush {ENCODED => a4d3cff76c8a3133e00d8e6d8859dc7d, NAME => 'TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:22:54,089 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=113 java.io.IOException: Unable to complete flush {ENCODED => a4d3cff76c8a3133e00d8e6d8859dc7d, NAME => 'TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:22:54,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4114): Remote procedure failed, pid=113 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a4d3cff76c8a3133e00d8e6d8859dc7d, NAME => 'TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a4d3cff76c8a3133e00d8e6d8859dc7d, NAME => 'TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:22:54,092 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/C/f92db1d2314245958ee03195ffa86f35, entries=150, sequenceid=295, filesize=12.0 K 2024-11-27T16:22:54,093 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~100.63 KB/103050, heapSize ~264.38 KB/270720, currentSize=107.34 KB/109920 for a4d3cff76c8a3133e00d8e6d8859dc7d in 277ms, sequenceid=295, compaction requested=true 2024-11-27T16:22:54,093 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for a4d3cff76c8a3133e00d8e6d8859dc7d: 2024-11-27T16:22:54,093 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a4d3cff76c8a3133e00d8e6d8859dc7d:A, priority=-2147483648, current under compaction store size is 1 2024-11-27T16:22:54,093 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T16:22:54,093 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a4d3cff76c8a3133e00d8e6d8859dc7d:B, priority=-2147483648, current under compaction store size is 2 2024-11-27T16:22:54,093 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-27T16:22:54,093 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-27T16:22:54,093 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a4d3cff76c8a3133e00d8e6d8859dc7d:C, priority=-2147483648, current under compaction store size is 3 2024-11-27T16:22:54,093 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-11-27T16:22:54,094 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-27T16:22:54,095 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 102889 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-27T16:22:54,096 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HStore(1540): a4d3cff76c8a3133e00d8e6d8859dc7d/A is initiating minor compaction (all files) 2024-11-27T16:22:54,096 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of a4d3cff76c8a3133e00d8e6d8859dc7d/A in TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d. 2024-11-27T16:22:54,096 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/A/275fa9bd383f4d44b2ad7925683a3eae, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/A/b86a1df4c3c14543a9af2dfd8f5a63e7, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/A/56edad44d3934d75b9b896bca88f5221] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/.tmp, totalSize=100.5 K 2024-11-27T16:22:54,096 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=53.85 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d. 2024-11-27T16:22:54,096 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d. files: [hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/A/275fa9bd383f4d44b2ad7925683a3eae, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/A/b86a1df4c3c14543a9af2dfd8f5a63e7, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/A/56edad44d3934d75b9b896bca88f5221] 2024-11-27T16:22:54,097 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37333 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-27T16:22:54,097 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HStore(1540): a4d3cff76c8a3133e00d8e6d8859dc7d/B is initiating minor compaction (all files) 2024-11-27T16:22:54,097 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of a4d3cff76c8a3133e00d8e6d8859dc7d/B in TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d. 2024-11-27T16:22:54,097 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/B/cfdf879a24d44ce6a3082350b2823a8d, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/B/66cc98dbf0494fd0a9569800b2f95a25, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/B/448b79929c21406bb360fe3b3f0648ba] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/.tmp, totalSize=36.5 K 2024-11-27T16:22:54,097 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 275fa9bd383f4d44b2ad7925683a3eae, keycount=150, bloomtype=ROW, size=30.9 K, encoding=NONE, compression=NONE, seqNum=253, earliestPutTs=1732724571056 2024-11-27T16:22:54,098 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting cfdf879a24d44ce6a3082350b2823a8d, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=253, earliestPutTs=1732724571056 2024-11-27T16:22:54,098 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.Compactor(224): Compacting b86a1df4c3c14543a9af2dfd8f5a63e7, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=274, earliestPutTs=1732724571269 2024-11-27T16:22:54,098 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting 66cc98dbf0494fd0a9569800b2f95a25, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=274, earliestPutTs=1732724571269 2024-11-27T16:22:54,098 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 56edad44d3934d75b9b896bca88f5221, keycount=200, bloomtype=ROW, size=39.0 K, encoding=NONE, compression=NONE, seqNum=295, earliestPutTs=1732724573489 2024-11-27T16:22:54,099 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting 448b79929c21406bb360fe3b3f0648ba, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=295, earliestPutTs=1732724573489 2024-11-27T16:22:54,113 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): a4d3cff76c8a3133e00d8e6d8859dc7d#B#compaction#384 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 53.85 MB/second 2024-11-27T16:22:54,114 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/.tmp/B/d4407a7c02dc40a6802d31eeedde5e0f is 50, key is test_row_0/B:col10/1732724573489/Put/seqid=0 2024-11-27T16:22:54,135 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=a4d3cff76c8a3133e00d8e6d8859dc7d] 2024-11-27T16:22:54,163 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241127390423c439834acb9766c5ba064bce26_a4d3cff76c8a3133e00d8e6d8859dc7d store=[table=TestAcidGuarantees family=A region=a4d3cff76c8a3133e00d8e6d8859dc7d] 2024-11-27T16:22:54,165 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241127390423c439834acb9766c5ba064bce26_a4d3cff76c8a3133e00d8e6d8859dc7d, store=[table=TestAcidGuarantees family=A region=a4d3cff76c8a3133e00d8e6d8859dc7d] 2024-11-27T16:22:54,165 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241127390423c439834acb9766c5ba064bce26_a4d3cff76c8a3133e00d8e6d8859dc7d because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=a4d3cff76c8a3133e00d8e6d8859dc7d] 2024-11-27T16:22:54,186 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing a4d3cff76c8a3133e00d8e6d8859dc7d 3/3 column families, dataSize=114.05 KB heapSize=299.58 KB 2024-11-27T16:22:54,186 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a4d3cff76c8a3133e00d8e6d8859dc7d, store=A 2024-11-27T16:22:54,187 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:22:54,187 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a4d3cff76c8a3133e00d8e6d8859dc7d, store=B 2024-11-27T16:22:54,187 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:22:54,187 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a4d3cff76c8a3133e00d8e6d8859dc7d, store=C 2024-11-27T16:22:54,187 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:22:54,187 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742282_1458 (size=12983) 2024-11-27T16:22:54,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(8581): Flush requested on a4d3cff76c8a3133e00d8e6d8859dc7d 2024-11-27T16:22:54,194 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/.tmp/B/d4407a7c02dc40a6802d31eeedde5e0f as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/B/d4407a7c02dc40a6802d31eeedde5e0f 2024-11-27T16:22:54,201 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in a4d3cff76c8a3133e00d8e6d8859dc7d/B of a4d3cff76c8a3133e00d8e6d8859dc7d into d4407a7c02dc40a6802d31eeedde5e0f(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T16:22:54,201 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for a4d3cff76c8a3133e00d8e6d8859dc7d: 2024-11-27T16:22:54,201 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d., storeName=a4d3cff76c8a3133e00d8e6d8859dc7d/B, priority=13, startTime=1732724574093; duration=0sec 2024-11-27T16:22:54,202 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-27T16:22:54,202 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a4d3cff76c8a3133e00d8e6d8859dc7d:B 2024-11-27T16:22:54,202 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-27T16:22:54,203 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37333 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-27T16:22:54,203 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HStore(1540): a4d3cff76c8a3133e00d8e6d8859dc7d/C is initiating minor compaction (all files) 2024-11-27T16:22:54,203 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of a4d3cff76c8a3133e00d8e6d8859dc7d/C in TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d. 2024-11-27T16:22:54,203 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/C/0fc9384662a2483889d7e4d03dec6be9, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/C/65433dd7583c49c4b6332d42adb0b55e, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/C/f92db1d2314245958ee03195ffa86f35] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/.tmp, totalSize=36.5 K 2024-11-27T16:22:54,203 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting 0fc9384662a2483889d7e4d03dec6be9, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=253, earliestPutTs=1732724571056 2024-11-27T16:22:54,204 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting 65433dd7583c49c4b6332d42adb0b55e, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=274, earliestPutTs=1732724571269 2024-11-27T16:22:54,204 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting f92db1d2314245958ee03195ffa86f35, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=295, earliestPutTs=1732724573489 2024-11-27T16:22:54,205 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411279340f99b22dc455aa00d3015e353a328_a4d3cff76c8a3133e00d8e6d8859dc7d is 50, key is test_row_0/A:col10/1732724573843/Put/seqid=0 2024-11-27T16:22:54,211 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742283_1459 (size=4469) 2024-11-27T16:22:54,225 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): a4d3cff76c8a3133e00d8e6d8859dc7d#A#compaction#385 average throughput is 0.27 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 53.85 MB/second 2024-11-27T16:22:54,226 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/.tmp/A/40a8db297abe4a6588177986463477bc is 175, key is test_row_0/A:col10/1732724573489/Put/seqid=0 2024-11-27T16:22:54,236 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): a4d3cff76c8a3133e00d8e6d8859dc7d#C#compaction#387 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 53.85 MB/second 2024-11-27T16:22:54,237 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/.tmp/C/87fa51a57beb4b9191ae4c298b59a57c is 50, key is test_row_0/C:col10/1732724573489/Put/seqid=0 2024-11-27T16:22:54,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=112 2024-11-27T16:22:54,241 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742284_1460 (size=14994) 2024-11-27T16:22:54,241 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:22:54,242 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:22:54,242 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=113 2024-11-27T16:22:54,242 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d. 2024-11-27T16:22:54,242 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d. as already flushing 2024-11-27T16:22:54,242 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d. 2024-11-27T16:22:54,242 ERROR [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] handler.RSProcedureHandler(58): pid=113 java.io.IOException: Unable to complete flush {ENCODED => a4d3cff76c8a3133e00d8e6d8859dc7d, NAME => 'TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:22:54,242 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=113 java.io.IOException: Unable to complete flush {ENCODED => a4d3cff76c8a3133e00d8e6d8859dc7d, NAME => 'TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:22:54,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4114): Remote procedure failed, pid=113 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a4d3cff76c8a3133e00d8e6d8859dc7d, NAME => 'TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a4d3cff76c8a3133e00d8e6d8859dc7d, NAME => 'TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:22:54,246 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411279340f99b22dc455aa00d3015e353a328_a4d3cff76c8a3133e00d8e6d8859dc7d to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411279340f99b22dc455aa00d3015e353a328_a4d3cff76c8a3133e00d8e6d8859dc7d 2024-11-27T16:22:54,247 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/.tmp/A/2710ac3c931249fabb04794ec1ba9505, store: [table=TestAcidGuarantees family=A region=a4d3cff76c8a3133e00d8e6d8859dc7d] 2024-11-27T16:22:54,248 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/.tmp/A/2710ac3c931249fabb04794ec1ba9505 is 175, key is test_row_0/A:col10/1732724573843/Put/seqid=0 2024-11-27T16:22:54,265 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:54,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 162 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33116 deadline: 1732724634245, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:54,266 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:54,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 157 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33056 deadline: 1732724634247, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:54,267 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:54,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33110 deadline: 1732724634256, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:54,281 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:54,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 163 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33092 deadline: 1732724634265, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:54,307 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742286_1462 (size=12983) 2024-11-27T16:22:54,313 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742287_1463 (size=39949) 2024-11-27T16:22:54,313 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=315, memsize=38.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/.tmp/A/2710ac3c931249fabb04794ec1ba9505 2024-11-27T16:22:54,326 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742285_1461 (size=31937) 2024-11-27T16:22:54,331 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/.tmp/A/40a8db297abe4a6588177986463477bc as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/A/40a8db297abe4a6588177986463477bc 2024-11-27T16:22:54,334 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/.tmp/B/d1ac218aa08245d2933a752f83221c85 is 50, key is test_row_0/B:col10/1732724573843/Put/seqid=0 2024-11-27T16:22:54,336 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in a4d3cff76c8a3133e00d8e6d8859dc7d/A of a4d3cff76c8a3133e00d8e6d8859dc7d into 40a8db297abe4a6588177986463477bc(size=31.2 K), total size for store is 31.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T16:22:54,336 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for a4d3cff76c8a3133e00d8e6d8859dc7d: 2024-11-27T16:22:54,336 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d., storeName=a4d3cff76c8a3133e00d8e6d8859dc7d/A, priority=13, startTime=1732724574093; duration=0sec 2024-11-27T16:22:54,336 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T16:22:54,336 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a4d3cff76c8a3133e00d8e6d8859dc7d:A 2024-11-27T16:22:54,362 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742288_1464 (size=12301) 2024-11-27T16:22:54,363 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=315 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/.tmp/B/d1ac218aa08245d2933a752f83221c85 2024-11-27T16:22:54,375 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/.tmp/C/0b0f84fecd5845e49222594188f0d029 is 50, key is test_row_0/C:col10/1732724573843/Put/seqid=0 2024-11-27T16:22:54,379 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:54,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 164 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33116 deadline: 1732724634368, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:54,380 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:54,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 159 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33056 deadline: 1732724634368, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:54,386 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742289_1465 (size=12301) 2024-11-27T16:22:54,388 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=315 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/.tmp/C/0b0f84fecd5845e49222594188f0d029 2024-11-27T16:22:54,390 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:54,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 158 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33110 deadline: 1732724634368, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:54,394 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:22:54,395 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/.tmp/A/2710ac3c931249fabb04794ec1ba9505 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/A/2710ac3c931249fabb04794ec1ba9505 2024-11-27T16:22:54,395 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=113 2024-11-27T16:22:54,395 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d. 2024-11-27T16:22:54,395 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d. as already flushing 2024-11-27T16:22:54,395 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d. 2024-11-27T16:22:54,395 ERROR [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] handler.RSProcedureHandler(58): pid=113 java.io.IOException: Unable to complete flush {ENCODED => a4d3cff76c8a3133e00d8e6d8859dc7d, NAME => 'TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:22:54,395 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=113 java.io.IOException: Unable to complete flush {ENCODED => a4d3cff76c8a3133e00d8e6d8859dc7d, NAME => 'TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:22:54,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4114): Remote procedure failed, pid=113 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a4d3cff76c8a3133e00d8e6d8859dc7d, NAME => 'TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a4d3cff76c8a3133e00d8e6d8859dc7d, NAME => 'TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:22:54,397 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:54,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 165 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33092 deadline: 1732724634383, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:54,404 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/A/2710ac3c931249fabb04794ec1ba9505, entries=200, sequenceid=315, filesize=39.0 K 2024-11-27T16:22:54,404 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/.tmp/B/d1ac218aa08245d2933a752f83221c85 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/B/d1ac218aa08245d2933a752f83221c85 2024-11-27T16:22:54,410 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/B/d1ac218aa08245d2933a752f83221c85, entries=150, sequenceid=315, filesize=12.0 K 2024-11-27T16:22:54,411 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/.tmp/C/0b0f84fecd5845e49222594188f0d029 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/C/0b0f84fecd5845e49222594188f0d029 2024-11-27T16:22:54,415 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/C/0b0f84fecd5845e49222594188f0d029, entries=150, sequenceid=315, filesize=12.0 K 2024-11-27T16:22:54,416 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~114.05 KB/116790, heapSize ~299.53 KB/306720, currentSize=87.22 KB/89310 for a4d3cff76c8a3133e00d8e6d8859dc7d in 230ms, sequenceid=315, compaction requested=false 2024-11-27T16:22:54,416 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for a4d3cff76c8a3133e00d8e6d8859dc7d: 2024-11-27T16:22:54,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=112 2024-11-27T16:22:54,548 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:22:54,549 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=113 2024-11-27T16:22:54,549 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d. 2024-11-27T16:22:54,549 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(2837): Flushing a4d3cff76c8a3133e00d8e6d8859dc7d 3/3 column families, dataSize=87.22 KB heapSize=229.27 KB 2024-11-27T16:22:54,549 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a4d3cff76c8a3133e00d8e6d8859dc7d, store=A 2024-11-27T16:22:54,549 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:22:54,549 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a4d3cff76c8a3133e00d8e6d8859dc7d, store=B 2024-11-27T16:22:54,549 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:22:54,549 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a4d3cff76c8a3133e00d8e6d8859dc7d, store=C 2024-11-27T16:22:54,549 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:22:54,574 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241127a2ed29ca450343e0af994d14fd495e0f_a4d3cff76c8a3133e00d8e6d8859dc7d is 50, key is test_row_0/A:col10/1732724574263/Put/seqid=0 2024-11-27T16:22:54,590 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d. as already flushing 2024-11-27T16:22:54,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(8581): Flush requested on a4d3cff76c8a3133e00d8e6d8859dc7d 2024-11-27T16:22:54,622 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742290_1466 (size=12454) 2024-11-27T16:22:54,687 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:54,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 166 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33056 deadline: 1732724634677, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:54,688 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:54,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 164 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33110 deadline: 1732724634677, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:54,689 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:54,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 171 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33116 deadline: 1732724634679, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:54,689 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:54,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 171 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33092 deadline: 1732724634679, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:54,717 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/.tmp/C/87fa51a57beb4b9191ae4c298b59a57c as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/C/87fa51a57beb4b9191ae4c298b59a57c 2024-11-27T16:22:54,724 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in a4d3cff76c8a3133e00d8e6d8859dc7d/C of a4d3cff76c8a3133e00d8e6d8859dc7d into 87fa51a57beb4b9191ae4c298b59a57c(size=12.7 K), total size for store is 24.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T16:22:54,724 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for a4d3cff76c8a3133e00d8e6d8859dc7d: 2024-11-27T16:22:54,726 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d., storeName=a4d3cff76c8a3133e00d8e6d8859dc7d/C, priority=13, startTime=1732724574093; duration=0sec 2024-11-27T16:22:54,726 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T16:22:54,726 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a4d3cff76c8a3133e00d8e6d8859dc7d:C 2024-11-27T16:22:54,793 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:54,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 168 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33056 deadline: 1732724634789, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:54,794 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:54,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 166 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33110 deadline: 1732724634789, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:54,794 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:54,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 173 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33116 deadline: 1732724634790, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:54,795 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:54,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 173 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33092 deadline: 1732724634791, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:55,003 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:55,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 175 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33116 deadline: 1732724634996, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:55,004 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:55,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 170 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33056 deadline: 1732724634996, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:55,004 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:55,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 175 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33092 deadline: 1732724634997, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:55,006 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:55,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 168 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33110 deadline: 1732724635000, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:55,023 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:22:55,037 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241127a2ed29ca450343e0af994d14fd495e0f_a4d3cff76c8a3133e00d8e6d8859dc7d to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241127a2ed29ca450343e0af994d14fd495e0f_a4d3cff76c8a3133e00d8e6d8859dc7d 2024-11-27T16:22:55,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=112 2024-11-27T16:22:55,053 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/.tmp/A/59389b101bf04000b7c674048c9caf6b, store: [table=TestAcidGuarantees family=A region=a4d3cff76c8a3133e00d8e6d8859dc7d] 2024-11-27T16:22:55,054 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/.tmp/A/59389b101bf04000b7c674048c9caf6b is 175, key is test_row_0/A:col10/1732724574263/Put/seqid=0 2024-11-27T16:22:55,101 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742291_1467 (size=31255) 2024-11-27T16:22:55,102 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=333, memsize=29.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/.tmp/A/59389b101bf04000b7c674048c9caf6b 2024-11-27T16:22:55,144 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/.tmp/B/1bcd2d5a50284b05a76021f53188612a is 50, key is test_row_0/B:col10/1732724574263/Put/seqid=0 2024-11-27T16:22:55,184 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742292_1468 (size=12301) 2024-11-27T16:22:55,312 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:55,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 177 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33116 deadline: 1732724635306, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:55,312 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:55,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 172 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33056 deadline: 1732724635306, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:55,313 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:55,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 177 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33092 deadline: 1732724635306, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:55,313 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:55,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 170 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33110 deadline: 1732724635308, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:55,315 DEBUG [regionserver/7b191dec6496:0.Chore.1 {}] throttle.PressureAwareCompactionThroughputController(103): CompactionPressure is 0.0, tune throughput to 50.00 MB/second 2024-11-27T16:22:55,326 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/7b191dec6496:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] regionserver.HStore(2316): Moving the files [hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/A/2d050878dd394b9e8a641bddca003329, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/A/59677fd4c6f14020ac185d8413f48aab, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/A/72b30d8d4d1449bf9362760aa80d3b21, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/A/56af902700004c05bcb5516401f36fb1, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/A/1a8deb7377194e28a2fb426e3549606a, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/A/b01c5c52b34f431298ec34d09c4e13df, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/A/104288c612004449bc800085c9f052fe, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/A/ebb5179562b84a52aa6739404cba4e5c, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/A/50b5000659b64a85bfc1a87ba5aa9fa0, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/A/b3c9ee8c7d3a4c7281c43b290b778477, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/A/1e0b6ee33ee346218501fe98e762c26d, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/A/26dceeee41364cb3bfcd523ad31f89b7, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/A/581b591148c346c4b78806b85ba257b2, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/A/15fcda054bdb4ca6bd4a9868610805f6, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/A/9c9f94791b7e4cc3afbe109e44a23853, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/A/9c1f4deac323410bbc34f1a3d0740299, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/A/275fa9bd383f4d44b2ad7925683a3eae, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/A/a7764fd5d40343c6b6640a0a3d2b9444, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/A/b86a1df4c3c14543a9af2dfd8f5a63e7, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/A/56edad44d3934d75b9b896bca88f5221] to archive 2024-11-27T16:22:55,330 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/7b191dec6496:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-27T16:22:55,336 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/7b191dec6496:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/A/2d050878dd394b9e8a641bddca003329 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/A/2d050878dd394b9e8a641bddca003329 2024-11-27T16:22:55,337 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/7b191dec6496:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/A/59677fd4c6f14020ac185d8413f48aab to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/A/59677fd4c6f14020ac185d8413f48aab 2024-11-27T16:22:55,339 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/7b191dec6496:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/A/72b30d8d4d1449bf9362760aa80d3b21 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/A/72b30d8d4d1449bf9362760aa80d3b21 2024-11-27T16:22:55,341 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/7b191dec6496:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/A/56af902700004c05bcb5516401f36fb1 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/A/56af902700004c05bcb5516401f36fb1 2024-11-27T16:22:55,342 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/7b191dec6496:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/A/1a8deb7377194e28a2fb426e3549606a to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/A/1a8deb7377194e28a2fb426e3549606a 2024-11-27T16:22:55,344 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/7b191dec6496:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/A/b01c5c52b34f431298ec34d09c4e13df to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/A/b01c5c52b34f431298ec34d09c4e13df 2024-11-27T16:22:55,345 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/7b191dec6496:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/A/104288c612004449bc800085c9f052fe to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/A/104288c612004449bc800085c9f052fe 2024-11-27T16:22:55,346 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/7b191dec6496:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/A/ebb5179562b84a52aa6739404cba4e5c to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/A/ebb5179562b84a52aa6739404cba4e5c 2024-11-27T16:22:55,348 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/7b191dec6496:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/A/50b5000659b64a85bfc1a87ba5aa9fa0 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/A/50b5000659b64a85bfc1a87ba5aa9fa0 2024-11-27T16:22:55,350 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/7b191dec6496:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/A/b3c9ee8c7d3a4c7281c43b290b778477 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/A/b3c9ee8c7d3a4c7281c43b290b778477 2024-11-27T16:22:55,352 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/7b191dec6496:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/A/1e0b6ee33ee346218501fe98e762c26d to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/A/1e0b6ee33ee346218501fe98e762c26d 2024-11-27T16:22:55,354 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/7b191dec6496:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/A/26dceeee41364cb3bfcd523ad31f89b7 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/A/26dceeee41364cb3bfcd523ad31f89b7 2024-11-27T16:22:55,358 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/7b191dec6496:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/A/581b591148c346c4b78806b85ba257b2 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/A/581b591148c346c4b78806b85ba257b2 2024-11-27T16:22:55,359 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/7b191dec6496:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/A/15fcda054bdb4ca6bd4a9868610805f6 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/A/15fcda054bdb4ca6bd4a9868610805f6 2024-11-27T16:22:55,361 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/7b191dec6496:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/A/9c9f94791b7e4cc3afbe109e44a23853 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/A/9c9f94791b7e4cc3afbe109e44a23853 2024-11-27T16:22:55,362 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/7b191dec6496:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/A/9c1f4deac323410bbc34f1a3d0740299 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/A/9c1f4deac323410bbc34f1a3d0740299 2024-11-27T16:22:55,364 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/7b191dec6496:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/A/275fa9bd383f4d44b2ad7925683a3eae to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/A/275fa9bd383f4d44b2ad7925683a3eae 2024-11-27T16:22:55,365 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/7b191dec6496:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/A/a7764fd5d40343c6b6640a0a3d2b9444 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/A/a7764fd5d40343c6b6640a0a3d2b9444 2024-11-27T16:22:55,367 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/7b191dec6496:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/A/b86a1df4c3c14543a9af2dfd8f5a63e7 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/A/b86a1df4c3c14543a9af2dfd8f5a63e7 2024-11-27T16:22:55,369 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/7b191dec6496:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/A/56edad44d3934d75b9b896bca88f5221 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/A/56edad44d3934d75b9b896bca88f5221 2024-11-27T16:22:55,375 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/7b191dec6496:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] regionserver.HStore(2316): Moving the files [hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/B/a6e41b72aab143e08917e16b4b24f55d, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/B/527027039be241eeb55778bb597678c5, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/B/193da816a3d44c58bd88e88cfa947f9f, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/B/0cfb2362836d4a918cc402554c49f548, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/B/f4ce771570cc4e288aa047d8eefa57d7, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/B/45a4a193933349e490daeb751c058285, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/B/18beb8a570f64a908a850c8879e83035, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/B/b05fbb5180354e329fc44ab8076381a4, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/B/0866c0cff70b4399be0b52cc22ce49ab, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/B/43b2c9ece0d74331919bb0d42d21f530, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/B/085dc0d27a304af0a020c7d435233934, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/B/88e382f00363461da9f322631a2f6e54, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/B/b72723bc276c48649a86dff9a35aa36d, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/B/7b0d0a00fa1b4e21aa405031c727c90e, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/B/e525543283ac4447b311710db8760848, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/B/d3ed781addc644b3af6dcfbb991806b2, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/B/cfdf879a24d44ce6a3082350b2823a8d, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/B/aa8b3d8d19564defa6267ae99a625d20, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/B/66cc98dbf0494fd0a9569800b2f95a25, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/B/448b79929c21406bb360fe3b3f0648ba] to archive 2024-11-27T16:22:55,377 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/7b191dec6496:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-27T16:22:55,381 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/7b191dec6496:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/B/a6e41b72aab143e08917e16b4b24f55d to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/B/a6e41b72aab143e08917e16b4b24f55d 2024-11-27T16:22:55,382 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/7b191dec6496:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/B/527027039be241eeb55778bb597678c5 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/B/527027039be241eeb55778bb597678c5 2024-11-27T16:22:55,386 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/7b191dec6496:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/B/193da816a3d44c58bd88e88cfa947f9f to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/B/193da816a3d44c58bd88e88cfa947f9f 2024-11-27T16:22:55,387 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/7b191dec6496:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/B/0cfb2362836d4a918cc402554c49f548 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/B/0cfb2362836d4a918cc402554c49f548 2024-11-27T16:22:55,393 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/7b191dec6496:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/B/f4ce771570cc4e288aa047d8eefa57d7 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/B/f4ce771570cc4e288aa047d8eefa57d7 2024-11-27T16:22:55,394 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/7b191dec6496:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/B/45a4a193933349e490daeb751c058285 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/B/45a4a193933349e490daeb751c058285 2024-11-27T16:22:55,396 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/7b191dec6496:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/B/18beb8a570f64a908a850c8879e83035 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/B/18beb8a570f64a908a850c8879e83035 2024-11-27T16:22:55,397 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/7b191dec6496:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/B/b05fbb5180354e329fc44ab8076381a4 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/B/b05fbb5180354e329fc44ab8076381a4 2024-11-27T16:22:55,400 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/7b191dec6496:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/B/0866c0cff70b4399be0b52cc22ce49ab to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/B/0866c0cff70b4399be0b52cc22ce49ab 2024-11-27T16:22:55,403 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/7b191dec6496:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/B/43b2c9ece0d74331919bb0d42d21f530 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/B/43b2c9ece0d74331919bb0d42d21f530 2024-11-27T16:22:55,405 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/7b191dec6496:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/B/085dc0d27a304af0a020c7d435233934 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/B/085dc0d27a304af0a020c7d435233934 2024-11-27T16:22:55,406 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/7b191dec6496:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/B/88e382f00363461da9f322631a2f6e54 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/B/88e382f00363461da9f322631a2f6e54 2024-11-27T16:22:55,410 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/7b191dec6496:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/B/b72723bc276c48649a86dff9a35aa36d to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/B/b72723bc276c48649a86dff9a35aa36d 2024-11-27T16:22:55,411 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/7b191dec6496:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/B/7b0d0a00fa1b4e21aa405031c727c90e to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/B/7b0d0a00fa1b4e21aa405031c727c90e 2024-11-27T16:22:55,414 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/7b191dec6496:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/B/e525543283ac4447b311710db8760848 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/B/e525543283ac4447b311710db8760848 2024-11-27T16:22:55,417 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/7b191dec6496:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/B/d3ed781addc644b3af6dcfbb991806b2 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/B/d3ed781addc644b3af6dcfbb991806b2 2024-11-27T16:22:55,420 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/7b191dec6496:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/B/cfdf879a24d44ce6a3082350b2823a8d to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/B/cfdf879a24d44ce6a3082350b2823a8d 2024-11-27T16:22:55,423 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/7b191dec6496:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/B/aa8b3d8d19564defa6267ae99a625d20 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/B/aa8b3d8d19564defa6267ae99a625d20 2024-11-27T16:22:55,425 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/7b191dec6496:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/B/66cc98dbf0494fd0a9569800b2f95a25 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/B/66cc98dbf0494fd0a9569800b2f95a25 2024-11-27T16:22:55,427 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/7b191dec6496:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/B/448b79929c21406bb360fe3b3f0648ba to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/B/448b79929c21406bb360fe3b3f0648ba 2024-11-27T16:22:55,435 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/7b191dec6496:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] regionserver.HStore(2316): Moving the files [hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/C/9a4aee30282444089190ef8701f6d740, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/C/74990d0aa6034ec6b80d8011b8a654ad, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/C/7edda95080ab4e218a71dbabbcd39787, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/C/07a4349ac0b04da681563c633f801a4a, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/C/baf4d3d663f14f2bba64083142582e86, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/C/7d894d5d75e642bd80d672d695c631c9, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/C/511d8adf98be44ceafaed61a8f9ba971, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/C/1fcd8c4dbe594a8395e14cc621fe0e65, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/C/7fb29b49f56845f5a88739f7cf9b626a, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/C/04b08e33ca1e4f9385a751bb68810b35, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/C/29b8b7b8920647e6893a560e7654b195, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/C/4736a38e288140ef964bd1ea89e87a9c, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/C/0ab8ef2522fa44919214cb2cbb400b61, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/C/f444345f6985499b8f10191689a2186f, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/C/1584fee575254e56a6107a3adb2db508, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/C/5e43faab3a084427a7c41eb43f4872e5, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/C/0fc9384662a2483889d7e4d03dec6be9, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/C/d5183e963374479b9f43913b1fb1cb04, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/C/65433dd7583c49c4b6332d42adb0b55e, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/C/f92db1d2314245958ee03195ffa86f35] to archive 2024-11-27T16:22:55,436 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/7b191dec6496:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-27T16:22:55,438 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/7b191dec6496:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/C/9a4aee30282444089190ef8701f6d740 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/C/9a4aee30282444089190ef8701f6d740 2024-11-27T16:22:55,439 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/7b191dec6496:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/C/74990d0aa6034ec6b80d8011b8a654ad to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/C/74990d0aa6034ec6b80d8011b8a654ad 2024-11-27T16:22:55,442 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/7b191dec6496:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/C/7edda95080ab4e218a71dbabbcd39787 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/C/7edda95080ab4e218a71dbabbcd39787 2024-11-27T16:22:55,443 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/7b191dec6496:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/C/07a4349ac0b04da681563c633f801a4a to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/C/07a4349ac0b04da681563c633f801a4a 2024-11-27T16:22:55,445 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/7b191dec6496:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/C/baf4d3d663f14f2bba64083142582e86 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/C/baf4d3d663f14f2bba64083142582e86 2024-11-27T16:22:55,446 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/7b191dec6496:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/C/7d894d5d75e642bd80d672d695c631c9 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/C/7d894d5d75e642bd80d672d695c631c9 2024-11-27T16:22:55,447 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/7b191dec6496:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/C/511d8adf98be44ceafaed61a8f9ba971 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/C/511d8adf98be44ceafaed61a8f9ba971 2024-11-27T16:22:55,450 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/7b191dec6496:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/C/1fcd8c4dbe594a8395e14cc621fe0e65 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/C/1fcd8c4dbe594a8395e14cc621fe0e65 2024-11-27T16:22:55,451 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/7b191dec6496:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/C/7fb29b49f56845f5a88739f7cf9b626a to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/C/7fb29b49f56845f5a88739f7cf9b626a 2024-11-27T16:22:55,454 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/7b191dec6496:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/C/04b08e33ca1e4f9385a751bb68810b35 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/C/04b08e33ca1e4f9385a751bb68810b35 2024-11-27T16:22:55,458 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/7b191dec6496:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/C/29b8b7b8920647e6893a560e7654b195 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/C/29b8b7b8920647e6893a560e7654b195 2024-11-27T16:22:55,459 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/7b191dec6496:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/C/4736a38e288140ef964bd1ea89e87a9c to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/C/4736a38e288140ef964bd1ea89e87a9c 2024-11-27T16:22:55,461 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/7b191dec6496:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/C/0ab8ef2522fa44919214cb2cbb400b61 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/C/0ab8ef2522fa44919214cb2cbb400b61 2024-11-27T16:22:55,463 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/7b191dec6496:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/C/f444345f6985499b8f10191689a2186f to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/C/f444345f6985499b8f10191689a2186f 2024-11-27T16:22:55,464 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/7b191dec6496:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/C/1584fee575254e56a6107a3adb2db508 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/C/1584fee575254e56a6107a3adb2db508 2024-11-27T16:22:55,468 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/7b191dec6496:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/C/5e43faab3a084427a7c41eb43f4872e5 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/C/5e43faab3a084427a7c41eb43f4872e5 2024-11-27T16:22:55,470 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/7b191dec6496:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/C/0fc9384662a2483889d7e4d03dec6be9 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/C/0fc9384662a2483889d7e4d03dec6be9 2024-11-27T16:22:55,471 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/7b191dec6496:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/C/d5183e963374479b9f43913b1fb1cb04 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/C/d5183e963374479b9f43913b1fb1cb04 2024-11-27T16:22:55,473 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/7b191dec6496:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/C/65433dd7583c49c4b6332d42adb0b55e to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/C/65433dd7583c49c4b6332d42adb0b55e 2024-11-27T16:22:55,474 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/7b191dec6496:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/C/f92db1d2314245958ee03195ffa86f35 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/C/f92db1d2314245958ee03195ffa86f35 2024-11-27T16:22:55,593 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=333 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/.tmp/B/1bcd2d5a50284b05a76021f53188612a 2024-11-27T16:22:55,622 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/.tmp/C/b5fc1186d0844b8e8617a2d4449fc857 is 50, key is test_row_0/C:col10/1732724574263/Put/seqid=0 2024-11-27T16:22:55,652 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742293_1469 (size=12301) 2024-11-27T16:22:55,661 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=333 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/.tmp/C/b5fc1186d0844b8e8617a2d4449fc857 2024-11-27T16:22:55,666 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/.tmp/A/59389b101bf04000b7c674048c9caf6b as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/A/59389b101bf04000b7c674048c9caf6b 2024-11-27T16:22:55,670 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/A/59389b101bf04000b7c674048c9caf6b, entries=150, sequenceid=333, filesize=30.5 K 2024-11-27T16:22:55,671 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/.tmp/B/1bcd2d5a50284b05a76021f53188612a as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/B/1bcd2d5a50284b05a76021f53188612a 2024-11-27T16:22:55,674 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/B/1bcd2d5a50284b05a76021f53188612a, entries=150, sequenceid=333, filesize=12.0 K 2024-11-27T16:22:55,675 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/.tmp/C/b5fc1186d0844b8e8617a2d4449fc857 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/C/b5fc1186d0844b8e8617a2d4449fc857 2024-11-27T16:22:55,678 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/C/b5fc1186d0844b8e8617a2d4449fc857, entries=150, sequenceid=333, filesize=12.0 K 2024-11-27T16:22:55,679 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(3040): Finished flush of dataSize ~87.22 KB/89310, heapSize ~229.22 KB/234720, currentSize=120.76 KB/123660 for a4d3cff76c8a3133e00d8e6d8859dc7d in 1130ms, sequenceid=333, compaction requested=true 2024-11-27T16:22:55,679 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(2538): Flush status journal for a4d3cff76c8a3133e00d8e6d8859dc7d: 2024-11-27T16:22:55,679 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d. 2024-11-27T16:22:55,679 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=113 2024-11-27T16:22:55,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4106): Remote procedure done, pid=113 2024-11-27T16:22:55,683 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=113, resume processing ppid=112 2024-11-27T16:22:55,683 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=113, ppid=112, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.7490 sec 2024-11-27T16:22:55,684 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=112, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=112, table=TestAcidGuarantees in 1.7530 sec 2024-11-27T16:22:55,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(8581): Flush requested on a4d3cff76c8a3133e00d8e6d8859dc7d 2024-11-27T16:22:55,830 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing a4d3cff76c8a3133e00d8e6d8859dc7d 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-11-27T16:22:55,831 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a4d3cff76c8a3133e00d8e6d8859dc7d, store=A 2024-11-27T16:22:55,831 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:22:55,831 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a4d3cff76c8a3133e00d8e6d8859dc7d, store=B 2024-11-27T16:22:55,831 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:22:55,831 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a4d3cff76c8a3133e00d8e6d8859dc7d, store=C 2024-11-27T16:22:55,831 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:22:55,847 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241127b96b45bb62b641548e1c1a208e0631e7_a4d3cff76c8a3133e00d8e6d8859dc7d is 50, key is test_row_0/A:col10/1732724574631/Put/seqid=0 2024-11-27T16:22:55,869 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:55,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 177 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33056 deadline: 1732724635859, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:55,869 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:55,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 182 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33092 deadline: 1732724635860, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:55,870 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:55,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 175 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33110 deadline: 1732724635861, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:55,872 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:55,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 182 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33116 deadline: 1732724635862, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:55,874 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742294_1470 (size=14994) 2024-11-27T16:22:55,875 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:22:55,881 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241127b96b45bb62b641548e1c1a208e0631e7_a4d3cff76c8a3133e00d8e6d8859dc7d to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241127b96b45bb62b641548e1c1a208e0631e7_a4d3cff76c8a3133e00d8e6d8859dc7d 2024-11-27T16:22:55,885 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/.tmp/A/f1fef015f4914f5a88d84ac03e657367, store: [table=TestAcidGuarantees family=A region=a4d3cff76c8a3133e00d8e6d8859dc7d] 2024-11-27T16:22:55,886 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/.tmp/A/f1fef015f4914f5a88d84ac03e657367 is 175, key is test_row_0/A:col10/1732724574631/Put/seqid=0 2024-11-27T16:22:55,931 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742295_1471 (size=39949) 2024-11-27T16:22:55,937 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=356, memsize=42.5 K, hasBloomFilter=true, into tmp file hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/.tmp/A/f1fef015f4914f5a88d84ac03e657367 2024-11-27T16:22:55,966 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/.tmp/B/b1ece5272a7e4d85b23ac53c86ffa2f9 is 50, key is test_row_0/B:col10/1732724574631/Put/seqid=0 2024-11-27T16:22:55,979 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:55,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 179 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33056 deadline: 1732724635970, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:55,980 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:55,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 184 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33092 deadline: 1732724635970, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:55,982 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:55,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 177 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33110 deadline: 1732724635973, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:55,983 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:55,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 184 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33116 deadline: 1732724635973, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:56,012 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742296_1472 (size=12301) 2024-11-27T16:22:56,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=112 2024-11-27T16:22:56,044 INFO [Thread-1701 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 112 completed 2024-11-27T16:22:56,045 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-27T16:22:56,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] procedure2.ProcedureExecutor(1098): Stored pid=114, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=114, table=TestAcidGuarantees 2024-11-27T16:22:56,047 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=114, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=114, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-27T16:22:56,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=114 2024-11-27T16:22:56,056 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=114, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=114, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-27T16:22:56,057 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=115, ppid=114, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-27T16:22:56,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=114 2024-11-27T16:22:56,189 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:56,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 186 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33092 deadline: 1732724636181, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:56,190 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:56,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 181 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33056 deadline: 1732724636181, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:56,199 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:56,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 179 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33110 deadline: 1732724636185, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:56,200 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:56,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 186 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33116 deadline: 1732724636185, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:56,208 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:22:56,209 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=115 2024-11-27T16:22:56,209 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d. 2024-11-27T16:22:56,209 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d. as already flushing 2024-11-27T16:22:56,209 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d. 2024-11-27T16:22:56,209 ERROR [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] handler.RSProcedureHandler(58): pid=115 java.io.IOException: Unable to complete flush {ENCODED => a4d3cff76c8a3133e00d8e6d8859dc7d, NAME => 'TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:22:56,210 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=115 java.io.IOException: Unable to complete flush {ENCODED => a4d3cff76c8a3133e00d8e6d8859dc7d, NAME => 'TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:22:56,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4114): Remote procedure failed, pid=115 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a4d3cff76c8a3133e00d8e6d8859dc7d, NAME => 'TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a4d3cff76c8a3133e00d8e6d8859dc7d, NAME => 'TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:22:56,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=114 2024-11-27T16:22:56,361 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:22:56,362 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=115 2024-11-27T16:22:56,362 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d. 2024-11-27T16:22:56,362 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d. as already flushing 2024-11-27T16:22:56,362 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d. 2024-11-27T16:22:56,362 ERROR [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] handler.RSProcedureHandler(58): pid=115 java.io.IOException: Unable to complete flush {ENCODED => a4d3cff76c8a3133e00d8e6d8859dc7d, NAME => 'TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:22:56,362 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=115 java.io.IOException: Unable to complete flush {ENCODED => a4d3cff76c8a3133e00d8e6d8859dc7d, NAME => 'TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:22:56,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4114): Remote procedure failed, pid=115 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a4d3cff76c8a3133e00d8e6d8859dc7d, NAME => 'TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a4d3cff76c8a3133e00d8e6d8859dc7d, NAME => 'TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:22:56,410 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=356 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/.tmp/B/b1ece5272a7e4d85b23ac53c86ffa2f9 2024-11-27T16:22:56,422 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/.tmp/C/8836a870a28a48509a8bb49ba4c36ecb is 50, key is test_row_0/C:col10/1732724574631/Put/seqid=0 2024-11-27T16:22:56,437 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742297_1473 (size=12301) 2024-11-27T16:22:56,439 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=356 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/.tmp/C/8836a870a28a48509a8bb49ba4c36ecb 2024-11-27T16:22:56,448 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/.tmp/A/f1fef015f4914f5a88d84ac03e657367 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/A/f1fef015f4914f5a88d84ac03e657367 2024-11-27T16:22:56,454 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/A/f1fef015f4914f5a88d84ac03e657367, entries=200, sequenceid=356, filesize=39.0 K 2024-11-27T16:22:56,460 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/.tmp/B/b1ece5272a7e4d85b23ac53c86ffa2f9 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/B/b1ece5272a7e4d85b23ac53c86ffa2f9 2024-11-27T16:22:56,466 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/B/b1ece5272a7e4d85b23ac53c86ffa2f9, entries=150, sequenceid=356, filesize=12.0 K 2024-11-27T16:22:56,466 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/.tmp/C/8836a870a28a48509a8bb49ba4c36ecb as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/C/8836a870a28a48509a8bb49ba4c36ecb 2024-11-27T16:22:56,471 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/C/8836a870a28a48509a8bb49ba4c36ecb, entries=150, sequenceid=356, filesize=12.0 K 2024-11-27T16:22:56,472 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~127.47 KB/130530, heapSize ~334.69 KB/342720, currentSize=73.80 KB/75570 for a4d3cff76c8a3133e00d8e6d8859dc7d in 642ms, sequenceid=356, compaction requested=true 2024-11-27T16:22:56,472 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for a4d3cff76c8a3133e00d8e6d8859dc7d: 2024-11-27T16:22:56,473 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-27T16:22:56,473 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a4d3cff76c8a3133e00d8e6d8859dc7d:A, priority=-2147483648, current under compaction store size is 1 2024-11-27T16:22:56,473 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T16:22:56,473 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a4d3cff76c8a3133e00d8e6d8859dc7d:B, priority=-2147483648, current under compaction store size is 2 2024-11-27T16:22:56,473 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T16:22:56,473 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a4d3cff76c8a3133e00d8e6d8859dc7d:C, priority=-2147483648, current under compaction store size is 3 2024-11-27T16:22:56,473 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-27T16:22:56,473 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-27T16:22:56,474 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 143090 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-27T16:22:56,474 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HStore(1540): a4d3cff76c8a3133e00d8e6d8859dc7d/A is initiating minor compaction (all files) 2024-11-27T16:22:56,474 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of a4d3cff76c8a3133e00d8e6d8859dc7d/A in TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d. 2024-11-27T16:22:56,474 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/A/40a8db297abe4a6588177986463477bc, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/A/2710ac3c931249fabb04794ec1ba9505, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/A/59389b101bf04000b7c674048c9caf6b, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/A/f1fef015f4914f5a88d84ac03e657367] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/.tmp, totalSize=139.7 K 2024-11-27T16:22:56,475 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=12 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d. 2024-11-27T16:22:56,475 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d. files: [hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/A/40a8db297abe4a6588177986463477bc, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/A/2710ac3c931249fabb04794ec1ba9505, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/A/59389b101bf04000b7c674048c9caf6b, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/A/f1fef015f4914f5a88d84ac03e657367] 2024-11-27T16:22:56,475 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49886 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-27T16:22:56,475 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HStore(1540): a4d3cff76c8a3133e00d8e6d8859dc7d/B is initiating minor compaction (all files) 2024-11-27T16:22:56,475 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of a4d3cff76c8a3133e00d8e6d8859dc7d/B in TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d. 2024-11-27T16:22:56,476 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/B/d4407a7c02dc40a6802d31eeedde5e0f, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/B/d1ac218aa08245d2933a752f83221c85, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/B/1bcd2d5a50284b05a76021f53188612a, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/B/b1ece5272a7e4d85b23ac53c86ffa2f9] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/.tmp, totalSize=48.7 K 2024-11-27T16:22:56,476 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 40a8db297abe4a6588177986463477bc, keycount=150, bloomtype=ROW, size=31.2 K, encoding=NONE, compression=NONE, seqNum=295, earliestPutTs=1732724573489 2024-11-27T16:22:56,476 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting d4407a7c02dc40a6802d31eeedde5e0f, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=295, earliestPutTs=1732724573489 2024-11-27T16:22:56,476 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2710ac3c931249fabb04794ec1ba9505, keycount=200, bloomtype=ROW, size=39.0 K, encoding=NONE, compression=NONE, seqNum=315, earliestPutTs=1732724573843 2024-11-27T16:22:56,477 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting d1ac218aa08245d2933a752f83221c85, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=315, earliestPutTs=1732724573843 2024-11-27T16:22:56,477 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 59389b101bf04000b7c674048c9caf6b, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=333, earliestPutTs=1732724574228 2024-11-27T16:22:56,477 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting 1bcd2d5a50284b05a76021f53188612a, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=333, earliestPutTs=1732724574228 2024-11-27T16:22:56,477 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.Compactor(224): Compacting f1fef015f4914f5a88d84ac03e657367, keycount=200, bloomtype=ROW, size=39.0 K, encoding=NONE, compression=NONE, seqNum=356, earliestPutTs=1732724574631 2024-11-27T16:22:56,478 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting b1ece5272a7e4d85b23ac53c86ffa2f9, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=356, earliestPutTs=1732724574631 2024-11-27T16:22:56,499 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=a4d3cff76c8a3133e00d8e6d8859dc7d] 2024-11-27T16:22:56,502 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): a4d3cff76c8a3133e00d8e6d8859dc7d#B#compaction#397 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-27T16:22:56,503 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/.tmp/B/86f8b88263504085a35f86fb366003bc is 50, key is test_row_0/B:col10/1732724574631/Put/seqid=0 2024-11-27T16:22:56,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(8581): Flush requested on a4d3cff76c8a3133e00d8e6d8859dc7d 2024-11-27T16:22:56,504 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing a4d3cff76c8a3133e00d8e6d8859dc7d 3/3 column families, dataSize=80.51 KB heapSize=211.69 KB 2024-11-27T16:22:56,505 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a4d3cff76c8a3133e00d8e6d8859dc7d, store=A 2024-11-27T16:22:56,505 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:22:56,505 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a4d3cff76c8a3133e00d8e6d8859dc7d, store=B 2024-11-27T16:22:56,505 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:22:56,505 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a4d3cff76c8a3133e00d8e6d8859dc7d, store=C 2024-11-27T16:22:56,505 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:22:56,513 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e2024112722b23a78d4f04ef690e9f96a5e73a438_a4d3cff76c8a3133e00d8e6d8859dc7d store=[table=TestAcidGuarantees family=A region=a4d3cff76c8a3133e00d8e6d8859dc7d] 2024-11-27T16:22:56,515 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:22:56,516 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=115 2024-11-27T16:22:56,516 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e2024112722b23a78d4f04ef690e9f96a5e73a438_a4d3cff76c8a3133e00d8e6d8859dc7d, store=[table=TestAcidGuarantees family=A region=a4d3cff76c8a3133e00d8e6d8859dc7d] 2024-11-27T16:22:56,516 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d. 2024-11-27T16:22:56,516 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d. as already flushing 2024-11-27T16:22:56,516 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d. 2024-11-27T16:22:56,516 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112722b23a78d4f04ef690e9f96a5e73a438_a4d3cff76c8a3133e00d8e6d8859dc7d because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=a4d3cff76c8a3133e00d8e6d8859dc7d] 2024-11-27T16:22:56,516 ERROR [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] handler.RSProcedureHandler(58): pid=115 java.io.IOException: Unable to complete flush {ENCODED => a4d3cff76c8a3133e00d8e6d8859dc7d, NAME => 'TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:22:56,516 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=115 java.io.IOException: Unable to complete flush {ENCODED => a4d3cff76c8a3133e00d8e6d8859dc7d, NAME => 'TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:22:56,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4114): Remote procedure failed, pid=115 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a4d3cff76c8a3133e00d8e6d8859dc7d, NAME => 'TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a4d3cff76c8a3133e00d8e6d8859dc7d, NAME => 'TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:22:56,537 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112713f5a0ccaf064f819b13894b03c776ef_a4d3cff76c8a3133e00d8e6d8859dc7d is 50, key is test_row_0/A:col10/1732724576502/Put/seqid=0 2024-11-27T16:22:56,538 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742298_1474 (size=12439) 2024-11-27T16:22:56,539 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742299_1475 (size=4469) 2024-11-27T16:22:56,541 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): a4d3cff76c8a3133e00d8e6d8859dc7d#A#compaction#396 average throughput is 0.60 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-27T16:22:56,541 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/.tmp/A/ecf1a8ae169b421bb02e6651b0f51bd9 is 175, key is test_row_0/A:col10/1732724574631/Put/seqid=0 2024-11-27T16:22:56,545 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742301_1477 (size=31393) 2024-11-27T16:22:56,550 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742300_1476 (size=17534) 2024-11-27T16:22:56,552 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:22:56,554 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/.tmp/A/ecf1a8ae169b421bb02e6651b0f51bd9 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/A/ecf1a8ae169b421bb02e6651b0f51bd9 2024-11-27T16:22:56,557 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112713f5a0ccaf064f819b13894b03c776ef_a4d3cff76c8a3133e00d8e6d8859dc7d to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112713f5a0ccaf064f819b13894b03c776ef_a4d3cff76c8a3133e00d8e6d8859dc7d 2024-11-27T16:22:56,560 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/.tmp/A/3a8a0a3495a1426da607443c0f25a194, store: [table=TestAcidGuarantees family=A region=a4d3cff76c8a3133e00d8e6d8859dc7d] 2024-11-27T16:22:56,560 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/.tmp/A/3a8a0a3495a1426da607443c0f25a194 is 175, key is test_row_0/A:col10/1732724576502/Put/seqid=0 2024-11-27T16:22:56,563 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in a4d3cff76c8a3133e00d8e6d8859dc7d/A of a4d3cff76c8a3133e00d8e6d8859dc7d into ecf1a8ae169b421bb02e6651b0f51bd9(size=30.7 K), total size for store is 30.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T16:22:56,563 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for a4d3cff76c8a3133e00d8e6d8859dc7d: 2024-11-27T16:22:56,563 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d., storeName=a4d3cff76c8a3133e00d8e6d8859dc7d/A, priority=12, startTime=1732724576472; duration=0sec 2024-11-27T16:22:56,563 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-27T16:22:56,563 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a4d3cff76c8a3133e00d8e6d8859dc7d:A 2024-11-27T16:22:56,563 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-27T16:22:56,564 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49886 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-27T16:22:56,565 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HStore(1540): a4d3cff76c8a3133e00d8e6d8859dc7d/C is initiating minor compaction (all files) 2024-11-27T16:22:56,565 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of a4d3cff76c8a3133e00d8e6d8859dc7d/C in TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d. 2024-11-27T16:22:56,565 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/C/87fa51a57beb4b9191ae4c298b59a57c, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/C/0b0f84fecd5845e49222594188f0d029, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/C/b5fc1186d0844b8e8617a2d4449fc857, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/C/8836a870a28a48509a8bb49ba4c36ecb] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/.tmp, totalSize=48.7 K 2024-11-27T16:22:56,565 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 87fa51a57beb4b9191ae4c298b59a57c, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=295, earliestPutTs=1732724573489 2024-11-27T16:22:56,566 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0b0f84fecd5845e49222594188f0d029, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=315, earliestPutTs=1732724573843 2024-11-27T16:22:56,566 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.Compactor(224): Compacting b5fc1186d0844b8e8617a2d4449fc857, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=333, earliestPutTs=1732724574228 2024-11-27T16:22:56,568 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8836a870a28a48509a8bb49ba4c36ecb, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=356, earliestPutTs=1732724574631 2024-11-27T16:22:56,583 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): a4d3cff76c8a3133e00d8e6d8859dc7d#C#compaction#399 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-27T16:22:56,583 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/.tmp/C/391ed64a451e4fcca13615a70a8ecffe is 50, key is test_row_0/C:col10/1732724574631/Put/seqid=0 2024-11-27T16:22:56,600 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:56,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 188 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33056 deadline: 1732724636585, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:56,601 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:56,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 192 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33116 deadline: 1732724636586, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:56,601 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:56,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 193 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33092 deadline: 1732724636586, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:56,602 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:56,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 186 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33110 deadline: 1732724636588, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:56,602 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742302_1478 (size=48639) 2024-11-27T16:22:56,621 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742303_1479 (size=12439) 2024-11-27T16:22:56,628 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/.tmp/C/391ed64a451e4fcca13615a70a8ecffe as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/C/391ed64a451e4fcca13615a70a8ecffe 2024-11-27T16:22:56,633 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in a4d3cff76c8a3133e00d8e6d8859dc7d/C of a4d3cff76c8a3133e00d8e6d8859dc7d into 391ed64a451e4fcca13615a70a8ecffe(size=12.1 K), total size for store is 12.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T16:22:56,633 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for a4d3cff76c8a3133e00d8e6d8859dc7d: 2024-11-27T16:22:56,633 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d., storeName=a4d3cff76c8a3133e00d8e6d8859dc7d/C, priority=12, startTime=1732724576473; duration=0sec 2024-11-27T16:22:56,633 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T16:22:56,633 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a4d3cff76c8a3133e00d8e6d8859dc7d:C 2024-11-27T16:22:56,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=114 2024-11-27T16:22:56,669 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:22:56,670 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=115 2024-11-27T16:22:56,670 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d. 2024-11-27T16:22:56,670 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d. as already flushing 2024-11-27T16:22:56,670 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d. 2024-11-27T16:22:56,670 ERROR [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] handler.RSProcedureHandler(58): pid=115 java.io.IOException: Unable to complete flush {ENCODED => a4d3cff76c8a3133e00d8e6d8859dc7d, NAME => 'TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:22:56,670 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=115 java.io.IOException: Unable to complete flush {ENCODED => a4d3cff76c8a3133e00d8e6d8859dc7d, NAME => 'TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:22:56,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4114): Remote procedure failed, pid=115 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a4d3cff76c8a3133e00d8e6d8859dc7d, NAME => 'TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a4d3cff76c8a3133e00d8e6d8859dc7d, NAME => 'TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:22:56,709 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:56,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 190 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33056 deadline: 1732724636702, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:56,710 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:56,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 194 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33116 deadline: 1732724636702, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:56,711 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:56,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 188 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33110 deadline: 1732724636703, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:56,711 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:56,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 195 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33092 deadline: 1732724636702, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:56,826 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:22:56,826 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=115 2024-11-27T16:22:56,827 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d. 2024-11-27T16:22:56,827 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d. as already flushing 2024-11-27T16:22:56,827 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d. 2024-11-27T16:22:56,827 ERROR [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] handler.RSProcedureHandler(58): pid=115 java.io.IOException: Unable to complete flush {ENCODED => a4d3cff76c8a3133e00d8e6d8859dc7d, NAME => 'TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:22:56,827 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=115 java.io.IOException: Unable to complete flush {ENCODED => a4d3cff76c8a3133e00d8e6d8859dc7d, NAME => 'TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:22:56,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4114): Remote procedure failed, pid=115 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a4d3cff76c8a3133e00d8e6d8859dc7d, NAME => 'TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a4d3cff76c8a3133e00d8e6d8859dc7d, NAME => 'TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:22:56,920 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:56,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 196 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33116 deadline: 1732724636916, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:56,920 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:56,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 197 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33092 deadline: 1732724636916, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:56,921 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:56,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 192 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33056 deadline: 1732724636916, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:56,922 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:56,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 190 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33110 deadline: 1732724636917, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:56,946 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/.tmp/B/86f8b88263504085a35f86fb366003bc as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/B/86f8b88263504085a35f86fb366003bc 2024-11-27T16:22:56,951 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in a4d3cff76c8a3133e00d8e6d8859dc7d/B of a4d3cff76c8a3133e00d8e6d8859dc7d into 86f8b88263504085a35f86fb366003bc(size=12.1 K), total size for store is 12.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T16:22:56,951 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for a4d3cff76c8a3133e00d8e6d8859dc7d: 2024-11-27T16:22:56,951 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d., storeName=a4d3cff76c8a3133e00d8e6d8859dc7d/B, priority=12, startTime=1732724576473; duration=0sec 2024-11-27T16:22:56,951 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T16:22:56,951 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a4d3cff76c8a3133e00d8e6d8859dc7d:B 2024-11-27T16:22:56,978 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:22:56,978 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=115 2024-11-27T16:22:56,979 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d. 2024-11-27T16:22:56,979 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d. as already flushing 2024-11-27T16:22:56,979 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d. 2024-11-27T16:22:56,979 ERROR [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] handler.RSProcedureHandler(58): pid=115 java.io.IOException: Unable to complete flush {ENCODED => a4d3cff76c8a3133e00d8e6d8859dc7d, NAME => 'TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:22:56,979 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=115 java.io.IOException: Unable to complete flush {ENCODED => a4d3cff76c8a3133e00d8e6d8859dc7d, NAME => 'TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:22:56,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4114): Remote procedure failed, pid=115 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a4d3cff76c8a3133e00d8e6d8859dc7d, NAME => 'TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a4d3cff76c8a3133e00d8e6d8859dc7d, NAME => 'TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:22:57,003 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=372, memsize=29.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/.tmp/A/3a8a0a3495a1426da607443c0f25a194 2024-11-27T16:22:57,021 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/.tmp/B/32ac565702934721bcd7cdda3a6e767b is 50, key is test_row_0/B:col10/1732724576502/Put/seqid=0 2024-11-27T16:22:57,070 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742304_1480 (size=12301) 2024-11-27T16:22:57,131 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:22:57,131 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=115 2024-11-27T16:22:57,131 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d. 2024-11-27T16:22:57,131 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d. as already flushing 2024-11-27T16:22:57,132 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d. 2024-11-27T16:22:57,132 ERROR [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] handler.RSProcedureHandler(58): pid=115 java.io.IOException: Unable to complete flush {ENCODED => a4d3cff76c8a3133e00d8e6d8859dc7d, NAME => 'TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:22:57,132 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=115 java.io.IOException: Unable to complete flush {ENCODED => a4d3cff76c8a3133e00d8e6d8859dc7d, NAME => 'TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:22:57,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4114): Remote procedure failed, pid=115 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a4d3cff76c8a3133e00d8e6d8859dc7d, NAME => 'TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a4d3cff76c8a3133e00d8e6d8859dc7d, NAME => 'TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:22:57,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=114 2024-11-27T16:22:57,227 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:57,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 198 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33116 deadline: 1732724637222, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:57,230 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:57,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 199 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33092 deadline: 1732724637224, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:57,230 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:57,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 194 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33056 deadline: 1732724637225, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:57,231 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:57,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 192 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33110 deadline: 1732724637225, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:57,283 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:22:57,283 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=115 2024-11-27T16:22:57,283 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d. 2024-11-27T16:22:57,284 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d. as already flushing 2024-11-27T16:22:57,284 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d. 2024-11-27T16:22:57,284 ERROR [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] handler.RSProcedureHandler(58): pid=115 java.io.IOException: Unable to complete flush {ENCODED => a4d3cff76c8a3133e00d8e6d8859dc7d, NAME => 'TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:22:57,284 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=115 java.io.IOException: Unable to complete flush {ENCODED => a4d3cff76c8a3133e00d8e6d8859dc7d, NAME => 'TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:22:57,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4114): Remote procedure failed, pid=115 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a4d3cff76c8a3133e00d8e6d8859dc7d, NAME => 'TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a4d3cff76c8a3133e00d8e6d8859dc7d, NAME => 'TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:22:57,436 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:22:57,437 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=115 2024-11-27T16:22:57,437 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d. 2024-11-27T16:22:57,437 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d. as already flushing 2024-11-27T16:22:57,437 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d. 2024-11-27T16:22:57,437 ERROR [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] handler.RSProcedureHandler(58): pid=115 java.io.IOException: Unable to complete flush {ENCODED => a4d3cff76c8a3133e00d8e6d8859dc7d, NAME => 'TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:22:57,437 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=115 java.io.IOException: Unable to complete flush {ENCODED => a4d3cff76c8a3133e00d8e6d8859dc7d, NAME => 'TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:22:57,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4114): Remote procedure failed, pid=115 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a4d3cff76c8a3133e00d8e6d8859dc7d, NAME => 'TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a4d3cff76c8a3133e00d8e6d8859dc7d, NAME => 'TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:22:57,471 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=372 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/.tmp/B/32ac565702934721bcd7cdda3a6e767b 2024-11-27T16:22:57,505 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/.tmp/C/4dff3b3954ff4e36b8eda5163987f82a is 50, key is test_row_0/C:col10/1732724576502/Put/seqid=0 2024-11-27T16:22:57,535 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742305_1481 (size=12301) 2024-11-27T16:22:57,536 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=372 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/.tmp/C/4dff3b3954ff4e36b8eda5163987f82a 2024-11-27T16:22:57,541 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/.tmp/A/3a8a0a3495a1426da607443c0f25a194 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/A/3a8a0a3495a1426da607443c0f25a194 2024-11-27T16:22:57,545 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/A/3a8a0a3495a1426da607443c0f25a194, entries=250, sequenceid=372, filesize=47.5 K 2024-11-27T16:22:57,546 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/.tmp/B/32ac565702934721bcd7cdda3a6e767b as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/B/32ac565702934721bcd7cdda3a6e767b 2024-11-27T16:22:57,551 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/B/32ac565702934721bcd7cdda3a6e767b, entries=150, sequenceid=372, filesize=12.0 K 2024-11-27T16:22:57,552 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/.tmp/C/4dff3b3954ff4e36b8eda5163987f82a as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/C/4dff3b3954ff4e36b8eda5163987f82a 2024-11-27T16:22:57,556 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/C/4dff3b3954ff4e36b8eda5163987f82a, entries=150, sequenceid=372, filesize=12.0 K 2024-11-27T16:22:57,557 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~87.22 KB/89310, heapSize ~229.22 KB/234720, currentSize=114.05 KB/116790 for a4d3cff76c8a3133e00d8e6d8859dc7d in 1053ms, sequenceid=372, compaction requested=false 2024-11-27T16:22:57,557 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for a4d3cff76c8a3133e00d8e6d8859dc7d: 2024-11-27T16:22:57,589 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:22:57,590 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=115 2024-11-27T16:22:57,590 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d. 2024-11-27T16:22:57,590 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(2837): Flushing a4d3cff76c8a3133e00d8e6d8859dc7d 3/3 column families, dataSize=114.05 KB heapSize=299.58 KB 2024-11-27T16:22:57,590 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a4d3cff76c8a3133e00d8e6d8859dc7d, store=A 2024-11-27T16:22:57,591 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:22:57,591 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a4d3cff76c8a3133e00d8e6d8859dc7d, store=B 2024-11-27T16:22:57,591 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:22:57,591 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a4d3cff76c8a3133e00d8e6d8859dc7d, store=C 2024-11-27T16:22:57,591 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:22:57,625 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112734f60f669947459eba573484fde8355b_a4d3cff76c8a3133e00d8e6d8859dc7d is 50, key is test_row_0/A:col10/1732724576586/Put/seqid=0 2024-11-27T16:22:57,687 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742306_1482 (size=12454) 2024-11-27T16:22:57,688 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:22:57,695 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112734f60f669947459eba573484fde8355b_a4d3cff76c8a3133e00d8e6d8859dc7d to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112734f60f669947459eba573484fde8355b_a4d3cff76c8a3133e00d8e6d8859dc7d 2024-11-27T16:22:57,697 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/.tmp/A/d35a463f34484e09896357002db8e1b6, store: [table=TestAcidGuarantees family=A region=a4d3cff76c8a3133e00d8e6d8859dc7d] 2024-11-27T16:22:57,698 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/.tmp/A/d35a463f34484e09896357002db8e1b6 is 175, key is test_row_0/A:col10/1732724576586/Put/seqid=0 2024-11-27T16:22:57,717 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742307_1483 (size=31255) 2024-11-27T16:22:57,718 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=395, memsize=38.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/.tmp/A/d35a463f34484e09896357002db8e1b6 2024-11-27T16:22:57,732 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/.tmp/B/62f7414e88b8434eb9a36f11b7e9d81c is 50, key is test_row_0/B:col10/1732724576586/Put/seqid=0 2024-11-27T16:22:57,744 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d. as already flushing 2024-11-27T16:22:57,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(8581): Flush requested on a4d3cff76c8a3133e00d8e6d8859dc7d 2024-11-27T16:22:57,769 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742308_1484 (size=12301) 2024-11-27T16:22:57,770 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=395 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/.tmp/B/62f7414e88b8434eb9a36f11b7e9d81c 2024-11-27T16:22:57,780 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:57,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 203 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33116 deadline: 1732724637769, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:57,782 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:57,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 204 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33092 deadline: 1732724637770, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:57,783 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:57,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 200 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33056 deadline: 1732724637780, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:57,783 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:57,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 197 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33110 deadline: 1732724637782, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:57,784 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/.tmp/C/72b6968638cb4e0084e0a0ff3889d022 is 50, key is test_row_0/C:col10/1732724576586/Put/seqid=0 2024-11-27T16:22:57,850 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742309_1485 (size=12301) 2024-11-27T16:22:57,892 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:57,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 205 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33116 deadline: 1732724637882, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:57,893 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:57,893 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:57,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 206 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33092 deadline: 1732724637884, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:57,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 202 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33056 deadline: 1732724637884, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:57,894 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:57,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 199 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33110 deadline: 1732724637885, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:58,102 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:58,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 207 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33116 deadline: 1732724638094, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:58,103 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:58,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 208 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33092 deadline: 1732724638095, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:58,105 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:58,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 204 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33056 deadline: 1732724638095, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:58,106 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:22:58,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 201 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33110 deadline: 1732724638097, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 2024-11-27T16:22:58,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=114 2024-11-27T16:22:58,251 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=395 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/.tmp/C/72b6968638cb4e0084e0a0ff3889d022 2024-11-27T16:22:58,256 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/.tmp/A/d35a463f34484e09896357002db8e1b6 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/A/d35a463f34484e09896357002db8e1b6 2024-11-27T16:22:58,266 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/A/d35a463f34484e09896357002db8e1b6, entries=150, sequenceid=395, filesize=30.5 K 2024-11-27T16:22:58,266 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/.tmp/B/62f7414e88b8434eb9a36f11b7e9d81c as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/B/62f7414e88b8434eb9a36f11b7e9d81c 2024-11-27T16:22:58,270 DEBUG [Thread-1708 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x191ae36a to 127.0.0.1:51088 2024-11-27T16:22:58,270 DEBUG [Thread-1708 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-27T16:22:58,272 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/B/62f7414e88b8434eb9a36f11b7e9d81c, entries=150, sequenceid=395, filesize=12.0 K 2024-11-27T16:22:58,273 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/.tmp/C/72b6968638cb4e0084e0a0ff3889d022 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/C/72b6968638cb4e0084e0a0ff3889d022 2024-11-27T16:22:58,276 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/C/72b6968638cb4e0084e0a0ff3889d022, entries=150, sequenceid=395, filesize=12.0 K 2024-11-27T16:22:58,277 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(3040): Finished flush of dataSize ~114.05 KB/116790, heapSize ~299.53 KB/306720, currentSize=87.22 KB/89310 for a4d3cff76c8a3133e00d8e6d8859dc7d in 687ms, sequenceid=395, compaction requested=true 2024-11-27T16:22:58,278 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(2538): Flush status journal for a4d3cff76c8a3133e00d8e6d8859dc7d: 2024-11-27T16:22:58,278 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d. 2024-11-27T16:22:58,278 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=115 2024-11-27T16:22:58,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4106): Remote procedure done, pid=115 2024-11-27T16:22:58,280 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=115, resume processing ppid=114 2024-11-27T16:22:58,280 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=115, ppid=114, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.2220 sec 2024-11-27T16:22:58,281 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=114, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=114, table=TestAcidGuarantees in 2.2350 sec 2024-11-27T16:22:58,282 DEBUG [Thread-1710 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x133cc1f0 to 127.0.0.1:51088 2024-11-27T16:22:58,282 DEBUG [Thread-1710 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-27T16:22:58,282 DEBUG [Thread-1702 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x3b7324d5 to 127.0.0.1:51088 2024-11-27T16:22:58,282 DEBUG [Thread-1702 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-27T16:22:58,284 DEBUG [Thread-1706 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x114e6211 to 127.0.0.1:51088 2024-11-27T16:22:58,284 DEBUG [Thread-1706 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-27T16:22:58,291 DEBUG [Thread-1704 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x4d930fb1 to 127.0.0.1:51088 2024-11-27T16:22:58,291 DEBUG [Thread-1704 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-27T16:22:58,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(8581): Flush requested on a4d3cff76c8a3133e00d8e6d8859dc7d 2024-11-27T16:22:58,406 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing a4d3cff76c8a3133e00d8e6d8859dc7d 3/3 column families, dataSize=93.93 KB heapSize=246.84 KB 2024-11-27T16:22:58,406 DEBUG [Thread-1691 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x3c8cc27b to 127.0.0.1:51088 2024-11-27T16:22:58,406 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a4d3cff76c8a3133e00d8e6d8859dc7d, store=A 2024-11-27T16:22:58,406 DEBUG [Thread-1691 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-27T16:22:58,406 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:22:58,406 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a4d3cff76c8a3133e00d8e6d8859dc7d, store=B 2024-11-27T16:22:58,406 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:22:58,406 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a4d3cff76c8a3133e00d8e6d8859dc7d, store=C 2024-11-27T16:22:58,406 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:22:58,410 DEBUG [Thread-1693 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x081cac4f to 127.0.0.1:51088 2024-11-27T16:22:58,410 DEBUG [Thread-1693 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-27T16:22:58,412 DEBUG [Thread-1695 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x64a04d7a to 127.0.0.1:51088 2024-11-27T16:22:58,412 DEBUG [Thread-1695 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-27T16:22:58,414 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241127afd01eed83f845d682763630620fe0a3_a4d3cff76c8a3133e00d8e6d8859dc7d is 50, key is test_row_0/A:col10/1732724577768/Put/seqid=0 2024-11-27T16:22:58,416 DEBUG [Thread-1699 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x6d7912a0 to 127.0.0.1:51088 2024-11-27T16:22:58,416 DEBUG [Thread-1699 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-27T16:22:58,419 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742310_1486 (size=12454) 2024-11-27T16:22:58,420 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:22:58,423 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241127afd01eed83f845d682763630620fe0a3_a4d3cff76c8a3133e00d8e6d8859dc7d to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241127afd01eed83f845d682763630620fe0a3_a4d3cff76c8a3133e00d8e6d8859dc7d 2024-11-27T16:22:58,424 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/.tmp/A/fac63ed55191468c87f659811aac70ad, store: [table=TestAcidGuarantees family=A region=a4d3cff76c8a3133e00d8e6d8859dc7d] 2024-11-27T16:22:58,424 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/.tmp/A/fac63ed55191468c87f659811aac70ad is 175, key is test_row_0/A:col10/1732724577768/Put/seqid=0 2024-11-27T16:22:58,427 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742311_1487 (size=31255) 2024-11-27T16:22:58,428 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=412, memsize=31.3 K, hasBloomFilter=true, into tmp file hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/.tmp/A/fac63ed55191468c87f659811aac70ad 2024-11-27T16:22:58,434 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/.tmp/B/979e63a3f300426c8066a176c3afa73a is 50, key is test_row_0/B:col10/1732724577768/Put/seqid=0 2024-11-27T16:22:58,442 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742312_1488 (size=12301) 2024-11-27T16:22:58,842 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=412 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/.tmp/B/979e63a3f300426c8066a176c3afa73a 2024-11-27T16:22:58,850 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/.tmp/C/4cce7e431f0449179298453830e5450e is 50, key is test_row_0/C:col10/1732724577768/Put/seqid=0 2024-11-27T16:22:58,853 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742313_1489 (size=12301) 2024-11-27T16:22:59,254 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=412 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/.tmp/C/4cce7e431f0449179298453830e5450e 2024-11-27T16:22:59,258 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/.tmp/A/fac63ed55191468c87f659811aac70ad as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/A/fac63ed55191468c87f659811aac70ad 2024-11-27T16:22:59,261 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/A/fac63ed55191468c87f659811aac70ad, entries=150, sequenceid=412, filesize=30.5 K 2024-11-27T16:22:59,262 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/.tmp/B/979e63a3f300426c8066a176c3afa73a as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/B/979e63a3f300426c8066a176c3afa73a 2024-11-27T16:22:59,265 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/B/979e63a3f300426c8066a176c3afa73a, entries=150, sequenceid=412, filesize=12.0 K 2024-11-27T16:22:59,266 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/.tmp/C/4cce7e431f0449179298453830e5450e as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/C/4cce7e431f0449179298453830e5450e 2024-11-27T16:22:59,269 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/C/4cce7e431f0449179298453830e5450e, entries=150, sequenceid=412, filesize=12.0 K 2024-11-27T16:22:59,270 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~93.93 KB/96180, heapSize ~246.80 KB/252720, currentSize=20.13 KB/20610 for a4d3cff76c8a3133e00d8e6d8859dc7d in 864ms, sequenceid=412, compaction requested=true 2024-11-27T16:22:59,270 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for a4d3cff76c8a3133e00d8e6d8859dc7d: 2024-11-27T16:22:59,270 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-27T16:22:59,270 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a4d3cff76c8a3133e00d8e6d8859dc7d:A, priority=-2147483648, current under compaction store size is 1 2024-11-27T16:22:59,271 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T16:22:59,271 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a4d3cff76c8a3133e00d8e6d8859dc7d:B, priority=-2147483648, current under compaction store size is 2 2024-11-27T16:22:59,271 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T16:22:59,271 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-27T16:22:59,271 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a4d3cff76c8a3133e00d8e6d8859dc7d:C, priority=-2147483648, current under compaction store size is 3 2024-11-27T16:22:59,271 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-27T16:22:59,272 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 142542 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-27T16:22:59,272 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HStore(1540): a4d3cff76c8a3133e00d8e6d8859dc7d/A is initiating minor compaction (all files) 2024-11-27T16:22:59,272 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of a4d3cff76c8a3133e00d8e6d8859dc7d/A in TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d. 2024-11-27T16:22:59,272 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49342 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-27T16:22:59,272 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/A/ecf1a8ae169b421bb02e6651b0f51bd9, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/A/3a8a0a3495a1426da607443c0f25a194, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/A/d35a463f34484e09896357002db8e1b6, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/A/fac63ed55191468c87f659811aac70ad] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/.tmp, totalSize=139.2 K 2024-11-27T16:22:59,272 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=12 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d. 2024-11-27T16:22:59,272 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HStore(1540): a4d3cff76c8a3133e00d8e6d8859dc7d/B is initiating minor compaction (all files) 2024-11-27T16:22:59,272 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d. files: [hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/A/ecf1a8ae169b421bb02e6651b0f51bd9, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/A/3a8a0a3495a1426da607443c0f25a194, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/A/d35a463f34484e09896357002db8e1b6, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/A/fac63ed55191468c87f659811aac70ad] 2024-11-27T16:22:59,272 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of a4d3cff76c8a3133e00d8e6d8859dc7d/B in TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d. 2024-11-27T16:22:59,272 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/B/86f8b88263504085a35f86fb366003bc, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/B/32ac565702934721bcd7cdda3a6e767b, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/B/62f7414e88b8434eb9a36f11b7e9d81c, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/B/979e63a3f300426c8066a176c3afa73a] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/.tmp, totalSize=48.2 K 2024-11-27T16:22:59,273 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.Compactor(224): Compacting ecf1a8ae169b421bb02e6651b0f51bd9, keycount=150, bloomtype=ROW, size=30.7 K, encoding=NONE, compression=NONE, seqNum=356, earliestPutTs=1732724574631 2024-11-27T16:22:59,273 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting 86f8b88263504085a35f86fb366003bc, keycount=150, bloomtype=ROW, size=12.1 K, encoding=NONE, compression=NONE, seqNum=356, earliestPutTs=1732724574631 2024-11-27T16:22:59,273 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3a8a0a3495a1426da607443c0f25a194, keycount=250, bloomtype=ROW, size=47.5 K, encoding=NONE, compression=NONE, seqNum=372, earliestPutTs=1732724575847 2024-11-27T16:22:59,273 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting 32ac565702934721bcd7cdda3a6e767b, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=372, earliestPutTs=1732724575861 2024-11-27T16:22:59,273 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.Compactor(224): Compacting d35a463f34484e09896357002db8e1b6, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=395, earliestPutTs=1732724576560 2024-11-27T16:22:59,273 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting 62f7414e88b8434eb9a36f11b7e9d81c, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=395, earliestPutTs=1732724576560 2024-11-27T16:22:59,273 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.Compactor(224): Compacting fac63ed55191468c87f659811aac70ad, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=412, earliestPutTs=1732724577768 2024-11-27T16:22:59,273 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting 979e63a3f300426c8066a176c3afa73a, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=412, earliestPutTs=1732724577768 2024-11-27T16:22:59,281 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=a4d3cff76c8a3133e00d8e6d8859dc7d] 2024-11-27T16:22:59,284 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): a4d3cff76c8a3133e00d8e6d8859dc7d#B#compaction#409 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-27T16:22:59,284 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241127a1575947023d4dcabed97e0cae6676dc_a4d3cff76c8a3133e00d8e6d8859dc7d store=[table=TestAcidGuarantees family=A region=a4d3cff76c8a3133e00d8e6d8859dc7d] 2024-11-27T16:22:59,284 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/.tmp/B/7bf723349b6645aaa13688a7432ffba5 is 50, key is test_row_0/B:col10/1732724577768/Put/seqid=0 2024-11-27T16:22:59,290 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241127a1575947023d4dcabed97e0cae6676dc_a4d3cff76c8a3133e00d8e6d8859dc7d, store=[table=TestAcidGuarantees family=A region=a4d3cff76c8a3133e00d8e6d8859dc7d] 2024-11-27T16:22:59,290 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241127a1575947023d4dcabed97e0cae6676dc_a4d3cff76c8a3133e00d8e6d8859dc7d because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=a4d3cff76c8a3133e00d8e6d8859dc7d] 2024-11-27T16:22:59,330 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742315_1491 (size=4469) 2024-11-27T16:22:59,331 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): a4d3cff76c8a3133e00d8e6d8859dc7d#A#compaction#408 average throughput is 0.49 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-27T16:22:59,332 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/.tmp/A/b4e47bd72aad49daa12027e71c60f4e9 is 175, key is test_row_0/A:col10/1732724577768/Put/seqid=0 2024-11-27T16:22:59,345 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742314_1490 (size=12575) 2024-11-27T16:22:59,351 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/.tmp/B/7bf723349b6645aaa13688a7432ffba5 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/B/7bf723349b6645aaa13688a7432ffba5 2024-11-27T16:22:59,356 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in a4d3cff76c8a3133e00d8e6d8859dc7d/B of a4d3cff76c8a3133e00d8e6d8859dc7d into 7bf723349b6645aaa13688a7432ffba5(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T16:22:59,356 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for a4d3cff76c8a3133e00d8e6d8859dc7d: 2024-11-27T16:22:59,356 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d., storeName=a4d3cff76c8a3133e00d8e6d8859dc7d/B, priority=12, startTime=1732724579271; duration=0sec 2024-11-27T16:22:59,356 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-27T16:22:59,356 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a4d3cff76c8a3133e00d8e6d8859dc7d:B 2024-11-27T16:22:59,357 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-27T16:22:59,358 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49342 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-27T16:22:59,358 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HStore(1540): a4d3cff76c8a3133e00d8e6d8859dc7d/C is initiating minor compaction (all files) 2024-11-27T16:22:59,358 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of a4d3cff76c8a3133e00d8e6d8859dc7d/C in TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d. 2024-11-27T16:22:59,358 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/C/391ed64a451e4fcca13615a70a8ecffe, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/C/4dff3b3954ff4e36b8eda5163987f82a, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/C/72b6968638cb4e0084e0a0ff3889d022, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/C/4cce7e431f0449179298453830e5450e] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/.tmp, totalSize=48.2 K 2024-11-27T16:22:59,359 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting 391ed64a451e4fcca13615a70a8ecffe, keycount=150, bloomtype=ROW, size=12.1 K, encoding=NONE, compression=NONE, seqNum=356, earliestPutTs=1732724574631 2024-11-27T16:22:59,359 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting 4dff3b3954ff4e36b8eda5163987f82a, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=372, earliestPutTs=1732724575861 2024-11-27T16:22:59,360 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting 72b6968638cb4e0084e0a0ff3889d022, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=395, earliestPutTs=1732724576560 2024-11-27T16:22:59,361 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting 4cce7e431f0449179298453830e5450e, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=412, earliestPutTs=1732724577768 2024-11-27T16:22:59,374 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742316_1492 (size=31529) 2024-11-27T16:22:59,386 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): a4d3cff76c8a3133e00d8e6d8859dc7d#C#compaction#410 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-27T16:22:59,387 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/.tmp/C/c296df847ffb411e8cbddb944a20c0e0 is 50, key is test_row_0/C:col10/1732724577768/Put/seqid=0 2024-11-27T16:22:59,393 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742317_1493 (size=12575) 2024-11-27T16:22:59,779 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/.tmp/A/b4e47bd72aad49daa12027e71c60f4e9 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/A/b4e47bd72aad49daa12027e71c60f4e9 2024-11-27T16:22:59,783 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in a4d3cff76c8a3133e00d8e6d8859dc7d/A of a4d3cff76c8a3133e00d8e6d8859dc7d into b4e47bd72aad49daa12027e71c60f4e9(size=30.8 K), total size for store is 30.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T16:22:59,783 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for a4d3cff76c8a3133e00d8e6d8859dc7d: 2024-11-27T16:22:59,783 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d., storeName=a4d3cff76c8a3133e00d8e6d8859dc7d/A, priority=12, startTime=1732724579270; duration=0sec 2024-11-27T16:22:59,783 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T16:22:59,783 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a4d3cff76c8a3133e00d8e6d8859dc7d:A 2024-11-27T16:22:59,797 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/.tmp/C/c296df847ffb411e8cbddb944a20c0e0 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/C/c296df847ffb411e8cbddb944a20c0e0 2024-11-27T16:22:59,802 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in a4d3cff76c8a3133e00d8e6d8859dc7d/C of a4d3cff76c8a3133e00d8e6d8859dc7d into c296df847ffb411e8cbddb944a20c0e0(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T16:22:59,802 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for a4d3cff76c8a3133e00d8e6d8859dc7d: 2024-11-27T16:22:59,802 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d., storeName=a4d3cff76c8a3133e00d8e6d8859dc7d/C, priority=12, startTime=1732724579271; duration=0sec 2024-11-27T16:22:59,802 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T16:22:59,802 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a4d3cff76c8a3133e00d8e6d8859dc7d:C 2024-11-27T16:23:00,054 DEBUG [Thread-1697 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x3268230a to 127.0.0.1:51088 2024-11-27T16:23:00,054 DEBUG [Thread-1697 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-27T16:23:00,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=114 2024-11-27T16:23:00,153 INFO [Thread-1701 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 114 completed 2024-11-27T16:23:00,153 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(392): Finished test. Writers: 2024-11-27T16:23:00,153 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 80 2024-11-27T16:23:00,153 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 81 2024-11-27T16:23:00,153 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 79 2024-11-27T16:23:00,153 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 14 2024-11-27T16:23:00,153 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 74 2024-11-27T16:23:00,153 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(396): Readers: 2024-11-27T16:23:00,154 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(400): Scanners: 2024-11-27T16:23:00,154 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 1396 2024-11-27T16:23:00,154 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 4187 rows 2024-11-27T16:23:00,154 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 1434 2024-11-27T16:23:00,154 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 4302 rows 2024-11-27T16:23:00,154 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 1420 2024-11-27T16:23:00,154 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 4259 rows 2024-11-27T16:23:00,154 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 1387 2024-11-27T16:23:00,154 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 4160 rows 2024-11-27T16:23:00,154 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 1400 2024-11-27T16:23:00,154 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 4200 rows 2024-11-27T16:23:00,154 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-11-27T16:23:00,154 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x2ecf33fc to 127.0.0.1:51088 2024-11-27T16:23:00,154 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-27T16:23:00,158 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of TestAcidGuarantees 2024-11-27T16:23:00,159 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable TestAcidGuarantees 2024-11-27T16:23:00,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] procedure2.ProcedureExecutor(1098): Stored pid=116, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=TestAcidGuarantees 2024-11-27T16:23:00,167 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732724580166"}]},"ts":"1732724580166"} 2024-11-27T16:23:00,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=116 2024-11-27T16:23:00,168 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLING in hbase:meta 2024-11-27T16:23:00,171 INFO [PEWorker-1 {}] procedure.DisableTableProcedure(284): Set TestAcidGuarantees to state=DISABLING 2024-11-27T16:23:00,172 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=117, ppid=116, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=TestAcidGuarantees}] 2024-11-27T16:23:00,174 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=118, ppid=117, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=a4d3cff76c8a3133e00d8e6d8859dc7d, UNASSIGN}] 2024-11-27T16:23:00,175 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=118, ppid=117, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=a4d3cff76c8a3133e00d8e6d8859dc7d, UNASSIGN 2024-11-27T16:23:00,175 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=118 updating hbase:meta row=a4d3cff76c8a3133e00d8e6d8859dc7d, regionState=CLOSING, regionLocation=7b191dec6496,44169,1732724452967 2024-11-27T16:23:00,177 DEBUG [PEWorker-2 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-27T16:23:00,178 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=119, ppid=118, state=RUNNABLE; CloseRegionProcedure a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967}] 2024-11-27T16:23:00,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=116 2024-11-27T16:23:00,329 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:23:00,330 INFO [RS_CLOSE_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] handler.UnassignRegionHandler(124): Close a4d3cff76c8a3133e00d8e6d8859dc7d 2024-11-27T16:23:00,330 DEBUG [RS_CLOSE_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-11-27T16:23:00,330 DEBUG [RS_CLOSE_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] regionserver.HRegion(1681): Closing a4d3cff76c8a3133e00d8e6d8859dc7d, disabling compactions & flushes 2024-11-27T16:23:00,330 INFO [RS_CLOSE_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d. 2024-11-27T16:23:00,330 DEBUG [RS_CLOSE_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d. 2024-11-27T16:23:00,330 DEBUG [RS_CLOSE_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d. after waiting 0 ms 2024-11-27T16:23:00,330 DEBUG [RS_CLOSE_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d. 2024-11-27T16:23:00,330 INFO [RS_CLOSE_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] regionserver.HRegion(2837): Flushing a4d3cff76c8a3133e00d8e6d8859dc7d 3/3 column families, dataSize=26.84 KB heapSize=71.06 KB 2024-11-27T16:23:00,330 DEBUG [RS_CLOSE_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a4d3cff76c8a3133e00d8e6d8859dc7d, store=A 2024-11-27T16:23:00,330 DEBUG [RS_CLOSE_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:23:00,330 DEBUG [RS_CLOSE_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a4d3cff76c8a3133e00d8e6d8859dc7d, store=B 2024-11-27T16:23:00,330 DEBUG [RS_CLOSE_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:23:00,331 DEBUG [RS_CLOSE_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a4d3cff76c8a3133e00d8e6d8859dc7d, store=C 2024-11-27T16:23:00,331 DEBUG [RS_CLOSE_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:23:00,337 DEBUG [RS_CLOSE_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241127c206cdb5c855434498c9f3ca190444b4_a4d3cff76c8a3133e00d8e6d8859dc7d is 50, key is test_row_0/A:col10/1732724578415/Put/seqid=0 2024-11-27T16:23:00,341 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742318_1494 (size=12454) 2024-11-27T16:23:00,341 DEBUG [RS_CLOSE_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:00,346 INFO [RS_CLOSE_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241127c206cdb5c855434498c9f3ca190444b4_a4d3cff76c8a3133e00d8e6d8859dc7d to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241127c206cdb5c855434498c9f3ca190444b4_a4d3cff76c8a3133e00d8e6d8859dc7d 2024-11-27T16:23:00,347 DEBUG [RS_CLOSE_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/.tmp/A/1fa77731330248f28a7cd37e744ab0b2, store: [table=TestAcidGuarantees family=A region=a4d3cff76c8a3133e00d8e6d8859dc7d] 2024-11-27T16:23:00,347 DEBUG [RS_CLOSE_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/.tmp/A/1fa77731330248f28a7cd37e744ab0b2 is 175, key is test_row_0/A:col10/1732724578415/Put/seqid=0 2024-11-27T16:23:00,365 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742319_1495 (size=31255) 2024-11-27T16:23:00,366 INFO [RS_CLOSE_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=422, memsize=8.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/.tmp/A/1fa77731330248f28a7cd37e744ab0b2 2024-11-27T16:23:00,373 DEBUG [RS_CLOSE_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/.tmp/B/5501ecb8f67e4fe494c5a5a1ac89b57d is 50, key is test_row_0/B:col10/1732724578415/Put/seqid=0 2024-11-27T16:23:00,378 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742320_1496 (size=12301) 2024-11-27T16:23:00,378 INFO [RS_CLOSE_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.95 KB at sequenceid=422 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/.tmp/B/5501ecb8f67e4fe494c5a5a1ac89b57d 2024-11-27T16:23:00,393 DEBUG [RS_CLOSE_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/.tmp/C/550983e772714648836635716535b016 is 50, key is test_row_0/C:col10/1732724578415/Put/seqid=0 2024-11-27T16:23:00,397 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742321_1497 (size=12301) 2024-11-27T16:23:00,398 INFO [RS_CLOSE_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.95 KB at sequenceid=422 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/.tmp/C/550983e772714648836635716535b016 2024-11-27T16:23:00,404 DEBUG [RS_CLOSE_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/.tmp/A/1fa77731330248f28a7cd37e744ab0b2 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/A/1fa77731330248f28a7cd37e744ab0b2 2024-11-27T16:23:00,408 INFO [RS_CLOSE_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/A/1fa77731330248f28a7cd37e744ab0b2, entries=150, sequenceid=422, filesize=30.5 K 2024-11-27T16:23:00,409 DEBUG [RS_CLOSE_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/.tmp/B/5501ecb8f67e4fe494c5a5a1ac89b57d as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/B/5501ecb8f67e4fe494c5a5a1ac89b57d 2024-11-27T16:23:00,412 INFO [RS_CLOSE_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/B/5501ecb8f67e4fe494c5a5a1ac89b57d, entries=150, sequenceid=422, filesize=12.0 K 2024-11-27T16:23:00,414 DEBUG [RS_CLOSE_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/.tmp/C/550983e772714648836635716535b016 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/C/550983e772714648836635716535b016 2024-11-27T16:23:00,418 INFO [RS_CLOSE_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/C/550983e772714648836635716535b016, entries=150, sequenceid=422, filesize=12.0 K 2024-11-27T16:23:00,419 INFO [RS_CLOSE_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] regionserver.HRegion(3040): Finished flush of dataSize ~26.84 KB/27480, heapSize ~71.02 KB/72720, currentSize=0 B/0 for a4d3cff76c8a3133e00d8e6d8859dc7d in 89ms, sequenceid=422, compaction requested=false 2024-11-27T16:23:00,420 DEBUG [StoreCloser-TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/A/40a8db297abe4a6588177986463477bc, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/A/2710ac3c931249fabb04794ec1ba9505, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/A/59389b101bf04000b7c674048c9caf6b, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/A/f1fef015f4914f5a88d84ac03e657367, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/A/ecf1a8ae169b421bb02e6651b0f51bd9, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/A/3a8a0a3495a1426da607443c0f25a194, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/A/d35a463f34484e09896357002db8e1b6, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/A/fac63ed55191468c87f659811aac70ad] to archive 2024-11-27T16:23:00,421 DEBUG [StoreCloser-TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-27T16:23:00,422 DEBUG [StoreCloser-TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/A/40a8db297abe4a6588177986463477bc to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/A/40a8db297abe4a6588177986463477bc 2024-11-27T16:23:00,423 DEBUG [StoreCloser-TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/A/2710ac3c931249fabb04794ec1ba9505 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/A/2710ac3c931249fabb04794ec1ba9505 2024-11-27T16:23:00,424 DEBUG [StoreCloser-TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/A/59389b101bf04000b7c674048c9caf6b to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/A/59389b101bf04000b7c674048c9caf6b 2024-11-27T16:23:00,426 DEBUG [StoreCloser-TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/A/f1fef015f4914f5a88d84ac03e657367 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/A/f1fef015f4914f5a88d84ac03e657367 2024-11-27T16:23:00,427 DEBUG [StoreCloser-TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/A/ecf1a8ae169b421bb02e6651b0f51bd9 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/A/ecf1a8ae169b421bb02e6651b0f51bd9 2024-11-27T16:23:00,428 DEBUG [StoreCloser-TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/A/3a8a0a3495a1426da607443c0f25a194 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/A/3a8a0a3495a1426da607443c0f25a194 2024-11-27T16:23:00,429 DEBUG [StoreCloser-TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/A/d35a463f34484e09896357002db8e1b6 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/A/d35a463f34484e09896357002db8e1b6 2024-11-27T16:23:00,430 DEBUG [StoreCloser-TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/A/fac63ed55191468c87f659811aac70ad to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/A/fac63ed55191468c87f659811aac70ad 2024-11-27T16:23:00,431 DEBUG [StoreCloser-TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/B/d4407a7c02dc40a6802d31eeedde5e0f, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/B/d1ac218aa08245d2933a752f83221c85, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/B/1bcd2d5a50284b05a76021f53188612a, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/B/86f8b88263504085a35f86fb366003bc, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/B/b1ece5272a7e4d85b23ac53c86ffa2f9, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/B/32ac565702934721bcd7cdda3a6e767b, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/B/62f7414e88b8434eb9a36f11b7e9d81c, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/B/979e63a3f300426c8066a176c3afa73a] to archive 2024-11-27T16:23:00,432 DEBUG [StoreCloser-TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-27T16:23:00,434 DEBUG [StoreCloser-TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/B/d4407a7c02dc40a6802d31eeedde5e0f to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/B/d4407a7c02dc40a6802d31eeedde5e0f 2024-11-27T16:23:00,435 DEBUG [StoreCloser-TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/B/d1ac218aa08245d2933a752f83221c85 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/B/d1ac218aa08245d2933a752f83221c85 2024-11-27T16:23:00,437 DEBUG [StoreCloser-TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/B/1bcd2d5a50284b05a76021f53188612a to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/B/1bcd2d5a50284b05a76021f53188612a 2024-11-27T16:23:00,438 DEBUG [StoreCloser-TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/B/86f8b88263504085a35f86fb366003bc to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/B/86f8b88263504085a35f86fb366003bc 2024-11-27T16:23:00,444 DEBUG [StoreCloser-TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/B/b1ece5272a7e4d85b23ac53c86ffa2f9 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/B/b1ece5272a7e4d85b23ac53c86ffa2f9 2024-11-27T16:23:00,445 DEBUG [StoreCloser-TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/B/32ac565702934721bcd7cdda3a6e767b to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/B/32ac565702934721bcd7cdda3a6e767b 2024-11-27T16:23:00,448 DEBUG [StoreCloser-TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/B/62f7414e88b8434eb9a36f11b7e9d81c to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/B/62f7414e88b8434eb9a36f11b7e9d81c 2024-11-27T16:23:00,450 DEBUG [StoreCloser-TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/B/979e63a3f300426c8066a176c3afa73a to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/B/979e63a3f300426c8066a176c3afa73a 2024-11-27T16:23:00,452 DEBUG [StoreCloser-TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/C/87fa51a57beb4b9191ae4c298b59a57c, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/C/0b0f84fecd5845e49222594188f0d029, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/C/b5fc1186d0844b8e8617a2d4449fc857, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/C/391ed64a451e4fcca13615a70a8ecffe, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/C/8836a870a28a48509a8bb49ba4c36ecb, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/C/4dff3b3954ff4e36b8eda5163987f82a, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/C/72b6968638cb4e0084e0a0ff3889d022, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/C/4cce7e431f0449179298453830e5450e] to archive 2024-11-27T16:23:00,453 DEBUG [StoreCloser-TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-27T16:23:00,455 DEBUG [StoreCloser-TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/C/87fa51a57beb4b9191ae4c298b59a57c to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/C/87fa51a57beb4b9191ae4c298b59a57c 2024-11-27T16:23:00,457 DEBUG [StoreCloser-TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/C/0b0f84fecd5845e49222594188f0d029 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/C/0b0f84fecd5845e49222594188f0d029 2024-11-27T16:23:00,458 DEBUG [StoreCloser-TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/C/b5fc1186d0844b8e8617a2d4449fc857 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/C/b5fc1186d0844b8e8617a2d4449fc857 2024-11-27T16:23:00,459 DEBUG [StoreCloser-TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/C/391ed64a451e4fcca13615a70a8ecffe to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/C/391ed64a451e4fcca13615a70a8ecffe 2024-11-27T16:23:00,461 DEBUG [StoreCloser-TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/C/8836a870a28a48509a8bb49ba4c36ecb to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/C/8836a870a28a48509a8bb49ba4c36ecb 2024-11-27T16:23:00,463 DEBUG [StoreCloser-TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/C/4dff3b3954ff4e36b8eda5163987f82a to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/C/4dff3b3954ff4e36b8eda5163987f82a 2024-11-27T16:23:00,465 DEBUG [StoreCloser-TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/C/72b6968638cb4e0084e0a0ff3889d022 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/C/72b6968638cb4e0084e0a0ff3889d022 2024-11-27T16:23:00,466 DEBUG [StoreCloser-TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/C/4cce7e431f0449179298453830e5450e to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/C/4cce7e431f0449179298453830e5450e 2024-11-27T16:23:00,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=116 2024-11-27T16:23:00,472 DEBUG [RS_CLOSE_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/recovered.edits/425.seqid, newMaxSeqId=425, maxSeqId=4 2024-11-27T16:23:00,473 INFO [RS_CLOSE_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d. 2024-11-27T16:23:00,473 DEBUG [RS_CLOSE_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] regionserver.HRegion(1635): Region close journal for a4d3cff76c8a3133e00d8e6d8859dc7d: 2024-11-27T16:23:00,475 INFO [RS_CLOSE_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] handler.UnassignRegionHandler(170): Closed a4d3cff76c8a3133e00d8e6d8859dc7d 2024-11-27T16:23:00,475 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=118 updating hbase:meta row=a4d3cff76c8a3133e00d8e6d8859dc7d, regionState=CLOSED 2024-11-27T16:23:00,478 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=119, resume processing ppid=118 2024-11-27T16:23:00,478 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=119, ppid=118, state=SUCCESS; CloseRegionProcedure a4d3cff76c8a3133e00d8e6d8859dc7d, server=7b191dec6496,44169,1732724452967 in 299 msec 2024-11-27T16:23:00,480 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=118, resume processing ppid=117 2024-11-27T16:23:00,480 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=118, ppid=117, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=a4d3cff76c8a3133e00d8e6d8859dc7d, UNASSIGN in 304 msec 2024-11-27T16:23:00,481 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=117, resume processing ppid=116 2024-11-27T16:23:00,481 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=117, ppid=116, state=SUCCESS; CloseTableRegionsProcedure table=TestAcidGuarantees in 308 msec 2024-11-27T16:23:00,482 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732724580482"}]},"ts":"1732724580482"} 2024-11-27T16:23:00,484 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLED in hbase:meta 2024-11-27T16:23:00,486 INFO [PEWorker-2 {}] procedure.DisableTableProcedure(296): Set TestAcidGuarantees to state=DISABLED 2024-11-27T16:23:00,488 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=116, state=SUCCESS; DisableTableProcedure table=TestAcidGuarantees in 328 msec 2024-11-27T16:23:00,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=116 2024-11-27T16:23:00,770 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:TestAcidGuarantees, procId: 116 completed 2024-11-27T16:23:00,771 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete TestAcidGuarantees 2024-11-27T16:23:00,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] procedure2.ProcedureExecutor(1098): Stored pid=120, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=TestAcidGuarantees 2024-11-27T16:23:00,772 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=120, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-27T16:23:00,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=120 2024-11-27T16:23:00,773 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=120, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-27T16:23:00,774 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d 2024-11-27T16:23:00,777 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/A, FileablePath, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/B, FileablePath, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/C, FileablePath, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/recovered.edits] 2024-11-27T16:23:00,779 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/A/1fa77731330248f28a7cd37e744ab0b2 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/A/1fa77731330248f28a7cd37e744ab0b2 2024-11-27T16:23:00,781 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/A/b4e47bd72aad49daa12027e71c60f4e9 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/A/b4e47bd72aad49daa12027e71c60f4e9 2024-11-27T16:23:00,784 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/B/5501ecb8f67e4fe494c5a5a1ac89b57d to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/B/5501ecb8f67e4fe494c5a5a1ac89b57d 2024-11-27T16:23:00,785 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/B/7bf723349b6645aaa13688a7432ffba5 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/B/7bf723349b6645aaa13688a7432ffba5 2024-11-27T16:23:00,788 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/C/550983e772714648836635716535b016 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/C/550983e772714648836635716535b016 2024-11-27T16:23:00,789 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/C/c296df847ffb411e8cbddb944a20c0e0 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/C/c296df847ffb411e8cbddb944a20c0e0 2024-11-27T16:23:00,791 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/recovered.edits/425.seqid to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d/recovered.edits/425.seqid 2024-11-27T16:23:00,792 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/a4d3cff76c8a3133e00d8e6d8859dc7d 2024-11-27T16:23:00,792 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(313): Archived TestAcidGuarantees regions 2024-11-27T16:23:00,792 DEBUG [PEWorker-4 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3 2024-11-27T16:23:00,793 DEBUG [PEWorker-4 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A] 2024-11-27T16:23:00,796 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112702f5f881dba84dec8357c2390047b1a9_a4d3cff76c8a3133e00d8e6d8859dc7d to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112702f5f881dba84dec8357c2390047b1a9_a4d3cff76c8a3133e00d8e6d8859dc7d 2024-11-27T16:23:00,797 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411270371e0fc015a423e8aeb15327803cef2_a4d3cff76c8a3133e00d8e6d8859dc7d to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411270371e0fc015a423e8aeb15327803cef2_a4d3cff76c8a3133e00d8e6d8859dc7d 2024-11-27T16:23:00,798 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241127114af0ec9c10455387e601166c4de7d1_a4d3cff76c8a3133e00d8e6d8859dc7d to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241127114af0ec9c10455387e601166c4de7d1_a4d3cff76c8a3133e00d8e6d8859dc7d 2024-11-27T16:23:00,800 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112713f5a0ccaf064f819b13894b03c776ef_a4d3cff76c8a3133e00d8e6d8859dc7d to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112713f5a0ccaf064f819b13894b03c776ef_a4d3cff76c8a3133e00d8e6d8859dc7d 2024-11-27T16:23:00,801 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411271b33f643cfdc4e80a5e6fac9e25388d0_a4d3cff76c8a3133e00d8e6d8859dc7d to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411271b33f643cfdc4e80a5e6fac9e25388d0_a4d3cff76c8a3133e00d8e6d8859dc7d 2024-11-27T16:23:00,802 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112734f60f669947459eba573484fde8355b_a4d3cff76c8a3133e00d8e6d8859dc7d to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112734f60f669947459eba573484fde8355b_a4d3cff76c8a3133e00d8e6d8859dc7d 2024-11-27T16:23:00,803 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411274ef612bfdbb34a619ad30db24391d1ec_a4d3cff76c8a3133e00d8e6d8859dc7d to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411274ef612bfdbb34a619ad30db24391d1ec_a4d3cff76c8a3133e00d8e6d8859dc7d 2024-11-27T16:23:00,805 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411274ff8ccd743f445e8aa8dc082c44e4bfe_a4d3cff76c8a3133e00d8e6d8859dc7d to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411274ff8ccd743f445e8aa8dc082c44e4bfe_a4d3cff76c8a3133e00d8e6d8859dc7d 2024-11-27T16:23:00,806 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411275e0c6dcb48a942cb9df26b37d3172753_a4d3cff76c8a3133e00d8e6d8859dc7d to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411275e0c6dcb48a942cb9df26b37d3172753_a4d3cff76c8a3133e00d8e6d8859dc7d 2024-11-27T16:23:00,807 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112769a2dbe4fb4043888551388153589026_a4d3cff76c8a3133e00d8e6d8859dc7d to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112769a2dbe4fb4043888551388153589026_a4d3cff76c8a3133e00d8e6d8859dc7d 2024-11-27T16:23:00,809 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411276c8e5454fc0842f491cd95453cba8148_a4d3cff76c8a3133e00d8e6d8859dc7d to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411276c8e5454fc0842f491cd95453cba8148_a4d3cff76c8a3133e00d8e6d8859dc7d 2024-11-27T16:23:00,810 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241127720c9902615a46519c5f15ba96c91b69_a4d3cff76c8a3133e00d8e6d8859dc7d to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241127720c9902615a46519c5f15ba96c91b69_a4d3cff76c8a3133e00d8e6d8859dc7d 2024-11-27T16:23:00,811 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411278328906cfffc4e9f8559aefbb3432342_a4d3cff76c8a3133e00d8e6d8859dc7d to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411278328906cfffc4e9f8559aefbb3432342_a4d3cff76c8a3133e00d8e6d8859dc7d 2024-11-27T16:23:00,813 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411278a60e1ee2a474c60a61457d4d12bc989_a4d3cff76c8a3133e00d8e6d8859dc7d to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411278a60e1ee2a474c60a61457d4d12bc989_a4d3cff76c8a3133e00d8e6d8859dc7d 2024-11-27T16:23:00,814 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411279340f99b22dc455aa00d3015e353a328_a4d3cff76c8a3133e00d8e6d8859dc7d to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411279340f99b22dc455aa00d3015e353a328_a4d3cff76c8a3133e00d8e6d8859dc7d 2024-11-27T16:23:00,816 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411279e65c61160fd4387bcc98b3f8e9b8cdf_a4d3cff76c8a3133e00d8e6d8859dc7d to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411279e65c61160fd4387bcc98b3f8e9b8cdf_a4d3cff76c8a3133e00d8e6d8859dc7d 2024-11-27T16:23:00,817 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241127a2ed29ca450343e0af994d14fd495e0f_a4d3cff76c8a3133e00d8e6d8859dc7d to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241127a2ed29ca450343e0af994d14fd495e0f_a4d3cff76c8a3133e00d8e6d8859dc7d 2024-11-27T16:23:00,819 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241127afd01eed83f845d682763630620fe0a3_a4d3cff76c8a3133e00d8e6d8859dc7d to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241127afd01eed83f845d682763630620fe0a3_a4d3cff76c8a3133e00d8e6d8859dc7d 2024-11-27T16:23:00,820 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241127b96b45bb62b641548e1c1a208e0631e7_a4d3cff76c8a3133e00d8e6d8859dc7d to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241127b96b45bb62b641548e1c1a208e0631e7_a4d3cff76c8a3133e00d8e6d8859dc7d 2024-11-27T16:23:00,821 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241127c206cdb5c855434498c9f3ca190444b4_a4d3cff76c8a3133e00d8e6d8859dc7d to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241127c206cdb5c855434498c9f3ca190444b4_a4d3cff76c8a3133e00d8e6d8859dc7d 2024-11-27T16:23:00,823 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241127c62b386e751d4cc6971acb69335509a5_a4d3cff76c8a3133e00d8e6d8859dc7d to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241127c62b386e751d4cc6971acb69335509a5_a4d3cff76c8a3133e00d8e6d8859dc7d 2024-11-27T16:23:00,824 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241127f305dadb7b1b4165a72b644a316c793d_a4d3cff76c8a3133e00d8e6d8859dc7d to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241127f305dadb7b1b4165a72b644a316c793d_a4d3cff76c8a3133e00d8e6d8859dc7d 2024-11-27T16:23:00,825 DEBUG [PEWorker-4 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3 2024-11-27T16:23:00,827 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=120, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-27T16:23:00,828 WARN [PEWorker-4 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 1 rows of TestAcidGuarantees from hbase:meta 2024-11-27T16:23:00,830 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(407): Removing 'TestAcidGuarantees' descriptor. 2024-11-27T16:23:00,831 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=120, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-27T16:23:00,831 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(397): Removing 'TestAcidGuarantees' from region states. 2024-11-27T16:23:00,832 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732724580831"}]},"ts":"9223372036854775807"} 2024-11-27T16:23:00,834 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1808): Deleted 1 regions from META 2024-11-27T16:23:00,834 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => a4d3cff76c8a3133e00d8e6d8859dc7d, NAME => 'TestAcidGuarantees,,1732724555193.a4d3cff76c8a3133e00d8e6d8859dc7d.', STARTKEY => '', ENDKEY => ''}] 2024-11-27T16:23:00,834 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(401): Marking 'TestAcidGuarantees' as deleted. 2024-11-27T16:23:00,834 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1732724580834"}]},"ts":"9223372036854775807"} 2024-11-27T16:23:00,835 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1678): Deleted table TestAcidGuarantees state from META 2024-11-27T16:23:00,837 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(133): Finished pid=120, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-27T16:23:00,838 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=120, state=SUCCESS; DeleteTableProcedure table=TestAcidGuarantees in 67 msec 2024-11-27T16:23:00,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=120 2024-11-27T16:23:00,874 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:TestAcidGuarantees, procId: 120 completed 2024-11-27T16:23:00,888 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: TestAcidGuaranteesWithBasicPolicy#testMobScanAtomicity Thread=239 (was 237) - Thread LEAK? -, OpenFileDescriptor=451 (was 453), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=786 (was 650) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=3846 (was 4013) 2024-11-27T16:23:00,902 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: TestAcidGuaranteesWithBasicPolicy#testScanAtomicity Thread=239, OpenFileDescriptor=451, MaxFileDescriptor=1048576, SystemLoadAverage=786, ProcessCount=11, AvailableMemoryMB=3846 2024-11-27T16:23:00,903 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-11-27T16:23:00,904 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'BASIC'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-27T16:23:00,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] procedure2.ProcedureExecutor(1098): Stored pid=121, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestAcidGuarantees 2024-11-27T16:23:00,907 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=121, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_PRE_OPERATION 2024-11-27T16:23:00,907 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:00,907 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestAcidGuarantees" procId is: 121 2024-11-27T16:23:00,908 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=121, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-27T16:23:00,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=121 2024-11-27T16:23:00,926 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742322_1498 (size=960) 2024-11-27T16:23:00,930 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 00f170535dc2739662302d98f22dc172, NAME => 'TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'BASIC', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59 2024-11-27T16:23:00,971 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742323_1499 (size=53) 2024-11-27T16:23:00,980 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-27T16:23:00,980 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1681): Closing 00f170535dc2739662302d98f22dc172, disabling compactions & flushes 2024-11-27T16:23:00,980 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172. 2024-11-27T16:23:00,980 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172. 2024-11-27T16:23:00,980 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172. after waiting 0 ms 2024-11-27T16:23:00,980 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172. 2024-11-27T16:23:00,980 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172. 2024-11-27T16:23:00,980 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1635): Region close journal for 00f170535dc2739662302d98f22dc172: 2024-11-27T16:23:00,981 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=121, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ADD_TO_META 2024-11-27T16:23:00,981 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.","families":{"info":[{"qualifier":"regioninfo","vlen":52,"tag":[],"timestamp":"1732724580981"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732724580981"}]},"ts":"1732724580981"} 2024-11-27T16:23:00,983 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-11-27T16:23:00,983 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=121, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-27T16:23:00,984 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732724580983"}]},"ts":"1732724580983"} 2024-11-27T16:23:00,985 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLING in hbase:meta 2024-11-27T16:23:00,990 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=122, ppid=121, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=00f170535dc2739662302d98f22dc172, ASSIGN}] 2024-11-27T16:23:00,992 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=122, ppid=121, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=00f170535dc2739662302d98f22dc172, ASSIGN 2024-11-27T16:23:00,995 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(264): Starting pid=122, ppid=121, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=00f170535dc2739662302d98f22dc172, ASSIGN; state=OFFLINE, location=7b191dec6496,44169,1732724452967; forceNewPlan=false, retain=false 2024-11-27T16:23:01,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=121 2024-11-27T16:23:01,146 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=122 updating hbase:meta row=00f170535dc2739662302d98f22dc172, regionState=OPENING, regionLocation=7b191dec6496,44169,1732724452967 2024-11-27T16:23:01,147 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=123, ppid=122, state=RUNNABLE; OpenRegionProcedure 00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967}] 2024-11-27T16:23:01,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=121 2024-11-27T16:23:01,299 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:23:01,302 INFO [RS_OPEN_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_OPEN_REGION, pid=123}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172. 2024-11-27T16:23:01,302 DEBUG [RS_OPEN_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_OPEN_REGION, pid=123}] regionserver.HRegion(7285): Opening region: {ENCODED => 00f170535dc2739662302d98f22dc172, NAME => 'TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.', STARTKEY => '', ENDKEY => ''} 2024-11-27T16:23:01,302 DEBUG [RS_OPEN_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_OPEN_REGION, pid=123}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees 00f170535dc2739662302d98f22dc172 2024-11-27T16:23:01,302 DEBUG [RS_OPEN_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_OPEN_REGION, pid=123}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-27T16:23:01,302 DEBUG [RS_OPEN_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_OPEN_REGION, pid=123}] regionserver.HRegion(7327): checking encryption for 00f170535dc2739662302d98f22dc172 2024-11-27T16:23:01,302 DEBUG [RS_OPEN_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_OPEN_REGION, pid=123}] regionserver.HRegion(7330): checking classloading for 00f170535dc2739662302d98f22dc172 2024-11-27T16:23:01,304 INFO [StoreOpener-00f170535dc2739662302d98f22dc172-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region 00f170535dc2739662302d98f22dc172 2024-11-27T16:23:01,305 INFO [StoreOpener-00f170535dc2739662302d98f22dc172-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-27T16:23:01,305 INFO [StoreOpener-00f170535dc2739662302d98f22dc172-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 00f170535dc2739662302d98f22dc172 columnFamilyName A 2024-11-27T16:23:01,305 DEBUG [StoreOpener-00f170535dc2739662302d98f22dc172-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:01,306 INFO [StoreOpener-00f170535dc2739662302d98f22dc172-1 {}] regionserver.HStore(327): Store=00f170535dc2739662302d98f22dc172/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-27T16:23:01,306 INFO [StoreOpener-00f170535dc2739662302d98f22dc172-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region 00f170535dc2739662302d98f22dc172 2024-11-27T16:23:01,307 INFO [StoreOpener-00f170535dc2739662302d98f22dc172-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-27T16:23:01,307 INFO [StoreOpener-00f170535dc2739662302d98f22dc172-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 00f170535dc2739662302d98f22dc172 columnFamilyName B 2024-11-27T16:23:01,307 DEBUG [StoreOpener-00f170535dc2739662302d98f22dc172-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:01,308 INFO [StoreOpener-00f170535dc2739662302d98f22dc172-1 {}] regionserver.HStore(327): Store=00f170535dc2739662302d98f22dc172/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-27T16:23:01,308 INFO [StoreOpener-00f170535dc2739662302d98f22dc172-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region 00f170535dc2739662302d98f22dc172 2024-11-27T16:23:01,309 INFO [StoreOpener-00f170535dc2739662302d98f22dc172-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-27T16:23:01,309 INFO [StoreOpener-00f170535dc2739662302d98f22dc172-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 00f170535dc2739662302d98f22dc172 columnFamilyName C 2024-11-27T16:23:01,309 DEBUG [StoreOpener-00f170535dc2739662302d98f22dc172-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:01,310 INFO [StoreOpener-00f170535dc2739662302d98f22dc172-1 {}] regionserver.HStore(327): Store=00f170535dc2739662302d98f22dc172/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-27T16:23:01,310 INFO [RS_OPEN_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_OPEN_REGION, pid=123}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172. 2024-11-27T16:23:01,310 DEBUG [RS_OPEN_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_OPEN_REGION, pid=123}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172 2024-11-27T16:23:01,311 DEBUG [RS_OPEN_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_OPEN_REGION, pid=123}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172 2024-11-27T16:23:01,312 DEBUG [RS_OPEN_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_OPEN_REGION, pid=123}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-27T16:23:01,313 DEBUG [RS_OPEN_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_OPEN_REGION, pid=123}] regionserver.HRegion(1085): writing seq id for 00f170535dc2739662302d98f22dc172 2024-11-27T16:23:01,315 DEBUG [RS_OPEN_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_OPEN_REGION, pid=123}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-27T16:23:01,316 INFO [RS_OPEN_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_OPEN_REGION, pid=123}] regionserver.HRegion(1102): Opened 00f170535dc2739662302d98f22dc172; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=71227126, jitterRate=0.061366885900497437}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-27T16:23:01,317 DEBUG [RS_OPEN_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_OPEN_REGION, pid=123}] regionserver.HRegion(1001): Region open journal for 00f170535dc2739662302d98f22dc172: 2024-11-27T16:23:01,317 INFO [RS_OPEN_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_OPEN_REGION, pid=123}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172., pid=123, masterSystemTime=1732724581299 2024-11-27T16:23:01,319 DEBUG [RS_OPEN_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_OPEN_REGION, pid=123}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172. 2024-11-27T16:23:01,319 INFO [RS_OPEN_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_OPEN_REGION, pid=123}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172. 2024-11-27T16:23:01,319 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=122 updating hbase:meta row=00f170535dc2739662302d98f22dc172, regionState=OPEN, openSeqNum=2, regionLocation=7b191dec6496,44169,1732724452967 2024-11-27T16:23:01,322 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=123, resume processing ppid=122 2024-11-27T16:23:01,322 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=123, ppid=122, state=SUCCESS; OpenRegionProcedure 00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 in 173 msec 2024-11-27T16:23:01,323 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=122, resume processing ppid=121 2024-11-27T16:23:01,323 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=122, ppid=121, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=00f170535dc2739662302d98f22dc172, ASSIGN in 333 msec 2024-11-27T16:23:01,324 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=121, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-27T16:23:01,324 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732724581324"}]},"ts":"1732724581324"} 2024-11-27T16:23:01,325 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLED in hbase:meta 2024-11-27T16:23:01,328 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=121, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_POST_OPERATION 2024-11-27T16:23:01,329 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=121, state=SUCCESS; CreateTableProcedure table=TestAcidGuarantees in 424 msec 2024-11-27T16:23:01,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=121 2024-11-27T16:23:01,512 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestAcidGuarantees, procId: 121 completed 2024-11-27T16:23:01,513 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x4fbee617 to 127.0.0.1:51088 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@4efaf022 2024-11-27T16:23:01,517 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@65036559, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-27T16:23:01,518 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-27T16:23:01,519 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38700, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-27T16:23:01,520 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-27T16:23:01,521 INFO [RS-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44428, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-27T16:23:01,523 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x46c37647 to 127.0.0.1:51088 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@5f21f55d 2024-11-27T16:23:01,526 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@21f67a4d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-27T16:23:01,526 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x2fb24d40 to 127.0.0.1:51088 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@7f69def6 2024-11-27T16:23:01,529 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6d5fe744, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-27T16:23:01,530 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x51453050 to 127.0.0.1:51088 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@60eadae0 2024-11-27T16:23:01,533 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@721d647e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-27T16:23:01,533 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x632d1806 to 127.0.0.1:51088 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@55a6e359 2024-11-27T16:23:01,536 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4c014307, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-27T16:23:01,537 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x4f99adfe to 127.0.0.1:51088 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@2d47237f 2024-11-27T16:23:01,543 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6b9854ab, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-27T16:23:01,544 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x42e690d6 to 127.0.0.1:51088 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@6b72a92d 2024-11-27T16:23:01,551 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6a4d4e08, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-27T16:23:01,551 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x3abeec20 to 127.0.0.1:51088 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@44fb119b 2024-11-27T16:23:01,554 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@44462a02, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-27T16:23:01,555 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x00df2701 to 127.0.0.1:51088 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@1c349948 2024-11-27T16:23:01,558 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@69d7a6f6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-27T16:23:01,558 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x35ca71a1 to 127.0.0.1:51088 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@d0c5089 2024-11-27T16:23:01,561 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5938a7c8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-27T16:23:01,562 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x56a4483a to 127.0.0.1:51088 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@3943c27f 2024-11-27T16:23:01,564 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@25593478, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-27T16:23:01,567 DEBUG [hconnection-0x51850923-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-27T16:23:01,568 DEBUG [hconnection-0x7649f71a-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-27T16:23:01,568 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38716, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-27T16:23:01,569 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38720, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-27T16:23:01,569 DEBUG [hconnection-0x7d075359-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-27T16:23:01,569 DEBUG [hconnection-0x771a315a-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-27T16:23:01,570 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-27T16:23:01,570 DEBUG [hconnection-0x39702952-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-27T16:23:01,570 DEBUG [hconnection-0x57d250c-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-27T16:23:01,570 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38730, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-27T16:23:01,571 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38742, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-27T16:23:01,571 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38740, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-27T16:23:01,571 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38754, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-27T16:23:01,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] procedure2.ProcedureExecutor(1098): Stored pid=124, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=124, table=TestAcidGuarantees 2024-11-27T16:23:01,572 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=124, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=124, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-27T16:23:01,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=124 2024-11-27T16:23:01,572 DEBUG [hconnection-0x1dfbb65d-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-27T16:23:01,573 DEBUG [hconnection-0x1dd9b4f2-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-27T16:23:01,573 DEBUG [hconnection-0x744b029f-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-27T16:23:01,573 DEBUG [hconnection-0x6f15ee40-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-27T16:23:01,573 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=124, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=124, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-27T16:23:01,573 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38758, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-27T16:23:01,573 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=125, ppid=124, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-27T16:23:01,574 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38764, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-27T16:23:01,574 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38770, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-27T16:23:01,574 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38772, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-27T16:23:01,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(8581): Flush requested on 00f170535dc2739662302d98f22dc172 2024-11-27T16:23:01,577 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 00f170535dc2739662302d98f22dc172 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-27T16:23:01,578 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 00f170535dc2739662302d98f22dc172, store=A 2024-11-27T16:23:01,578 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:23:01,578 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 00f170535dc2739662302d98f22dc172, store=B 2024-11-27T16:23:01,578 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:23:01,578 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 00f170535dc2739662302d98f22dc172, store=C 2024-11-27T16:23:01,578 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:23:01,594 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:01,594 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:01,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38740 deadline: 1732724641591, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:01,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 6 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38758 deadline: 1732724641592, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:01,594 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:01,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38754 deadline: 1732724641593, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:01,595 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:01,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38716 deadline: 1732724641595, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:01,600 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:01,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38730 deadline: 1732724641599, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:01,612 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/A/e8a26740741f4b849f0fe7496b05942f is 50, key is test_row_0/A:col10/1732724581577/Put/seqid=0 2024-11-27T16:23:01,626 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742324_1500 (size=14341) 2024-11-27T16:23:01,627 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=14 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/A/e8a26740741f4b849f0fe7496b05942f 2024-11-27T16:23:01,658 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/B/b2bc52a29d364b23a2cdb82ec5857967 is 50, key is test_row_0/B:col10/1732724581577/Put/seqid=0 2024-11-27T16:23:01,674 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742325_1501 (size=12001) 2024-11-27T16:23:01,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=124 2024-11-27T16:23:01,675 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=14 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/B/b2bc52a29d364b23a2cdb82ec5857967 2024-11-27T16:23:01,696 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:01,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38740 deadline: 1732724641696, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:01,696 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:01,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38758 deadline: 1732724641696, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:01,697 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:01,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38716 deadline: 1732724641696, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:01,703 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:01,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38754 deadline: 1732724641698, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:01,703 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:01,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38730 deadline: 1732724641701, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:01,704 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/C/bc09cf13db674d4999d215696716ec65 is 50, key is test_row_0/C:col10/1732724581577/Put/seqid=0 2024-11-27T16:23:01,712 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742326_1502 (size=12001) 2024-11-27T16:23:01,729 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:23:01,729 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=125 2024-11-27T16:23:01,729 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172. 2024-11-27T16:23:01,729 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172. as already flushing 2024-11-27T16:23:01,729 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172. 2024-11-27T16:23:01,729 ERROR [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] handler.RSProcedureHandler(58): pid=125 java.io.IOException: Unable to complete flush {ENCODED => 00f170535dc2739662302d98f22dc172, NAME => 'TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:23:01,729 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=125 java.io.IOException: Unable to complete flush {ENCODED => 00f170535dc2739662302d98f22dc172, NAME => 'TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:23:01,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4114): Remote procedure failed, pid=125 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 00f170535dc2739662302d98f22dc172, NAME => 'TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 00f170535dc2739662302d98f22dc172, NAME => 'TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:23:01,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=124 2024-11-27T16:23:01,881 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:23:01,882 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=125 2024-11-27T16:23:01,882 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172. 2024-11-27T16:23:01,882 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172. as already flushing 2024-11-27T16:23:01,882 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172. 2024-11-27T16:23:01,882 ERROR [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=125}] handler.RSProcedureHandler(58): pid=125 java.io.IOException: Unable to complete flush {ENCODED => 00f170535dc2739662302d98f22dc172, NAME => 'TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:23:01,882 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=125 java.io.IOException: Unable to complete flush {ENCODED => 00f170535dc2739662302d98f22dc172, NAME => 'TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:23:01,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4114): Remote procedure failed, pid=125 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 00f170535dc2739662302d98f22dc172, NAME => 'TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 00f170535dc2739662302d98f22dc172, NAME => 'TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:23:01,902 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:01,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38740 deadline: 1732724641897, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:01,907 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:01,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38758 deadline: 1732724641904, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:01,907 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:01,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38754 deadline: 1732724641904, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:01,908 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:01,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38716 deadline: 1732724641904, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:01,908 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:01,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38730 deadline: 1732724641905, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:02,034 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:23:02,034 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=125 2024-11-27T16:23:02,035 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172. 2024-11-27T16:23:02,035 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172. as already flushing 2024-11-27T16:23:02,035 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172. 2024-11-27T16:23:02,035 ERROR [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] handler.RSProcedureHandler(58): pid=125 java.io.IOException: Unable to complete flush {ENCODED => 00f170535dc2739662302d98f22dc172, NAME => 'TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:23:02,035 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=125 java.io.IOException: Unable to complete flush {ENCODED => 00f170535dc2739662302d98f22dc172, NAME => 'TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:23:02,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4114): Remote procedure failed, pid=125 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 00f170535dc2739662302d98f22dc172, NAME => 'TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 00f170535dc2739662302d98f22dc172, NAME => 'TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:23:02,113 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=14 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/C/bc09cf13db674d4999d215696716ec65 2024-11-27T16:23:02,119 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/A/e8a26740741f4b849f0fe7496b05942f as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/A/e8a26740741f4b849f0fe7496b05942f 2024-11-27T16:23:02,123 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/A/e8a26740741f4b849f0fe7496b05942f, entries=200, sequenceid=14, filesize=14.0 K 2024-11-27T16:23:02,123 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/B/b2bc52a29d364b23a2cdb82ec5857967 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/B/b2bc52a29d364b23a2cdb82ec5857967 2024-11-27T16:23:02,126 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/B/b2bc52a29d364b23a2cdb82ec5857967, entries=150, sequenceid=14, filesize=11.7 K 2024-11-27T16:23:02,127 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/C/bc09cf13db674d4999d215696716ec65 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/C/bc09cf13db674d4999d215696716ec65 2024-11-27T16:23:02,132 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/C/bc09cf13db674d4999d215696716ec65, entries=150, sequenceid=14, filesize=11.7 K 2024-11-27T16:23:02,133 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for 00f170535dc2739662302d98f22dc172 in 555ms, sequenceid=14, compaction requested=false 2024-11-27T16:23:02,133 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 00f170535dc2739662302d98f22dc172: 2024-11-27T16:23:02,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=124 2024-11-27T16:23:02,187 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:23:02,187 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=125 2024-11-27T16:23:02,187 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172. 2024-11-27T16:23:02,187 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HRegion(2837): Flushing 00f170535dc2739662302d98f22dc172 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-11-27T16:23:02,187 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 00f170535dc2739662302d98f22dc172, store=A 2024-11-27T16:23:02,188 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:23:02,188 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 00f170535dc2739662302d98f22dc172, store=B 2024-11-27T16:23:02,188 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:23:02,188 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 00f170535dc2739662302d98f22dc172, store=C 2024-11-27T16:23:02,188 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:23:02,192 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/A/d6c00ceab2d04782bf4824d90bbcf4f3 is 50, key is test_row_0/A:col10/1732724581592/Put/seqid=0 2024-11-27T16:23:02,196 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742327_1503 (size=12001) 2024-11-27T16:23:02,197 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=37 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/A/d6c00ceab2d04782bf4824d90bbcf4f3 2024-11-27T16:23:02,206 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/B/d568b3410e7945e9893fa535cf1fef55 is 50, key is test_row_0/B:col10/1732724581592/Put/seqid=0 2024-11-27T16:23:02,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(8581): Flush requested on 00f170535dc2739662302d98f22dc172 2024-11-27T16:23:02,207 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172. as already flushing 2024-11-27T16:23:02,211 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742328_1504 (size=12001) 2024-11-27T16:23:02,212 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=37 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/B/d568b3410e7945e9893fa535cf1fef55 2024-11-27T16:23:02,221 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/C/9b5e200e5cca4031ac8f0b1cf6b9ef83 is 50, key is test_row_0/C:col10/1732724581592/Put/seqid=0 2024-11-27T16:23:02,226 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742329_1505 (size=12001) 2024-11-27T16:23:02,226 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=37 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/C/9b5e200e5cca4031ac8f0b1cf6b9ef83 2024-11-27T16:23:02,226 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:02,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38716 deadline: 1732724642219, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:02,226 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:02,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38758 deadline: 1732724642219, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:02,227 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:02,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38740 deadline: 1732724642220, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:02,230 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/A/d6c00ceab2d04782bf4824d90bbcf4f3 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/A/d6c00ceab2d04782bf4824d90bbcf4f3 2024-11-27T16:23:02,231 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:02,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38754 deadline: 1732724642224, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:02,232 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:02,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38730 deadline: 1732724642225, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:02,233 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/A/d6c00ceab2d04782bf4824d90bbcf4f3, entries=150, sequenceid=37, filesize=11.7 K 2024-11-27T16:23:02,234 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/B/d568b3410e7945e9893fa535cf1fef55 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/B/d568b3410e7945e9893fa535cf1fef55 2024-11-27T16:23:02,237 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/B/d568b3410e7945e9893fa535cf1fef55, entries=150, sequenceid=37, filesize=11.7 K 2024-11-27T16:23:02,238 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/C/9b5e200e5cca4031ac8f0b1cf6b9ef83 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/C/9b5e200e5cca4031ac8f0b1cf6b9ef83 2024-11-27T16:23:02,242 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/C/9b5e200e5cca4031ac8f0b1cf6b9ef83, entries=150, sequenceid=37, filesize=11.7 K 2024-11-27T16:23:02,243 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=67.09 KB/68700 for 00f170535dc2739662302d98f22dc172 in 56ms, sequenceid=37, compaction requested=false 2024-11-27T16:23:02,243 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HRegion(2538): Flush status journal for 00f170535dc2739662302d98f22dc172: 2024-11-27T16:23:02,243 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172. 2024-11-27T16:23:02,243 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=125 2024-11-27T16:23:02,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4106): Remote procedure done, pid=125 2024-11-27T16:23:02,246 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=125, resume processing ppid=124 2024-11-27T16:23:02,247 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=125, ppid=124, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 672 msec 2024-11-27T16:23:02,248 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=124, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=124, table=TestAcidGuarantees in 677 msec 2024-11-27T16:23:02,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(8581): Flush requested on 00f170535dc2739662302d98f22dc172 2024-11-27T16:23:02,335 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 00f170535dc2739662302d98f22dc172 3/3 column families, dataSize=80.51 KB heapSize=211.69 KB 2024-11-27T16:23:02,335 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 00f170535dc2739662302d98f22dc172, store=A 2024-11-27T16:23:02,335 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:23:02,335 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 00f170535dc2739662302d98f22dc172, store=B 2024-11-27T16:23:02,335 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:23:02,335 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 00f170535dc2739662302d98f22dc172, store=C 2024-11-27T16:23:02,335 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:23:02,340 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/A/c554ac2f54624062a5216cdb33789176 is 50, key is test_row_0/A:col10/1732724582216/Put/seqid=0 2024-11-27T16:23:02,351 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742330_1506 (size=12001) 2024-11-27T16:23:02,352 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=52 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/A/c554ac2f54624062a5216cdb33789176 2024-11-27T16:23:02,366 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/B/4b84932384d84719b57dc8094bd675af is 50, key is test_row_0/B:col10/1732724582216/Put/seqid=0 2024-11-27T16:23:02,374 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:02,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38730 deadline: 1732724642366, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:02,378 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:02,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38716 deadline: 1732724642369, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:02,379 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:02,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38740 deadline: 1732724642370, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:02,379 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:02,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38754 deadline: 1732724642371, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:02,383 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:02,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38758 deadline: 1732724642374, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:02,393 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742331_1507 (size=12001) 2024-11-27T16:23:02,394 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=52 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/B/4b84932384d84719b57dc8094bd675af 2024-11-27T16:23:02,401 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/C/73e7961fa2094c2286e3b553d2a4fd72 is 50, key is test_row_0/C:col10/1732724582216/Put/seqid=0 2024-11-27T16:23:02,417 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742332_1508 (size=12001) 2024-11-27T16:23:02,417 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=52 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/C/73e7961fa2094c2286e3b553d2a4fd72 2024-11-27T16:23:02,421 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/A/c554ac2f54624062a5216cdb33789176 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/A/c554ac2f54624062a5216cdb33789176 2024-11-27T16:23:02,425 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/A/c554ac2f54624062a5216cdb33789176, entries=150, sequenceid=52, filesize=11.7 K 2024-11-27T16:23:02,426 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/B/4b84932384d84719b57dc8094bd675af as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/B/4b84932384d84719b57dc8094bd675af 2024-11-27T16:23:02,429 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/B/4b84932384d84719b57dc8094bd675af, entries=150, sequenceid=52, filesize=11.7 K 2024-11-27T16:23:02,430 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/C/73e7961fa2094c2286e3b553d2a4fd72 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/C/73e7961fa2094c2286e3b553d2a4fd72 2024-11-27T16:23:02,433 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/C/73e7961fa2094c2286e3b553d2a4fd72, entries=150, sequenceid=52, filesize=11.7 K 2024-11-27T16:23:02,435 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=120.76 KB/123660 for 00f170535dc2739662302d98f22dc172 in 100ms, sequenceid=52, compaction requested=true 2024-11-27T16:23:02,435 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 00f170535dc2739662302d98f22dc172: 2024-11-27T16:23:02,435 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-27T16:23:02,435 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 00f170535dc2739662302d98f22dc172:A, priority=-2147483648, current under compaction store size is 1 2024-11-27T16:23:02,435 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T16:23:02,435 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-27T16:23:02,435 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 00f170535dc2739662302d98f22dc172:B, priority=-2147483648, current under compaction store size is 2 2024-11-27T16:23:02,435 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T16:23:02,435 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 00f170535dc2739662302d98f22dc172:C, priority=-2147483648, current under compaction store size is 3 2024-11-27T16:23:02,435 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-27T16:23:02,436 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-27T16:23:02,436 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HStore(1540): 00f170535dc2739662302d98f22dc172/B is initiating minor compaction (all files) 2024-11-27T16:23:02,436 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 00f170535dc2739662302d98f22dc172/B in TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172. 2024-11-27T16:23:02,436 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/B/b2bc52a29d364b23a2cdb82ec5857967, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/B/d568b3410e7945e9893fa535cf1fef55, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/B/4b84932384d84719b57dc8094bd675af] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp, totalSize=35.2 K 2024-11-27T16:23:02,437 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 38343 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-27T16:23:02,437 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HStore(1540): 00f170535dc2739662302d98f22dc172/A is initiating minor compaction (all files) 2024-11-27T16:23:02,437 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 00f170535dc2739662302d98f22dc172/A in TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172. 2024-11-27T16:23:02,437 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/A/e8a26740741f4b849f0fe7496b05942f, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/A/d6c00ceab2d04782bf4824d90bbcf4f3, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/A/c554ac2f54624062a5216cdb33789176] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp, totalSize=37.4 K 2024-11-27T16:23:02,437 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting b2bc52a29d364b23a2cdb82ec5857967, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=14, earliestPutTs=1732724581575 2024-11-27T16:23:02,437 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.Compactor(224): Compacting e8a26740741f4b849f0fe7496b05942f, keycount=200, bloomtype=ROW, size=14.0 K, encoding=NONE, compression=NONE, seqNum=14, earliestPutTs=1732724581575 2024-11-27T16:23:02,437 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting d568b3410e7945e9893fa535cf1fef55, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=37, earliestPutTs=1732724581589 2024-11-27T16:23:02,438 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.Compactor(224): Compacting d6c00ceab2d04782bf4824d90bbcf4f3, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=37, earliestPutTs=1732724581589 2024-11-27T16:23:02,438 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting 4b84932384d84719b57dc8094bd675af, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=52, earliestPutTs=1732724582216 2024-11-27T16:23:02,438 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.Compactor(224): Compacting c554ac2f54624062a5216cdb33789176, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=52, earliestPutTs=1732724582216 2024-11-27T16:23:02,444 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 00f170535dc2739662302d98f22dc172#A#compaction#423 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-27T16:23:02,445 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/A/6a935771f09f4745af5b04dad37d5037 is 50, key is test_row_0/A:col10/1732724582216/Put/seqid=0 2024-11-27T16:23:02,446 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 00f170535dc2739662302d98f22dc172#B#compaction#424 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-27T16:23:02,446 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/B/2ef032029d5e43c0ba0d6d0e070f28c4 is 50, key is test_row_0/B:col10/1732724582216/Put/seqid=0 2024-11-27T16:23:02,454 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742334_1510 (size=12104) 2024-11-27T16:23:02,455 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742333_1509 (size=12104) 2024-11-27T16:23:02,459 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/B/2ef032029d5e43c0ba0d6d0e070f28c4 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/B/2ef032029d5e43c0ba0d6d0e070f28c4 2024-11-27T16:23:02,464 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 00f170535dc2739662302d98f22dc172/B of 00f170535dc2739662302d98f22dc172 into 2ef032029d5e43c0ba0d6d0e070f28c4(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T16:23:02,464 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 00f170535dc2739662302d98f22dc172: 2024-11-27T16:23:02,464 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172., storeName=00f170535dc2739662302d98f22dc172/B, priority=13, startTime=1732724582435; duration=0sec 2024-11-27T16:23:02,465 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-27T16:23:02,465 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 00f170535dc2739662302d98f22dc172:B 2024-11-27T16:23:02,465 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-27T16:23:02,466 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-27T16:23:02,466 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HStore(1540): 00f170535dc2739662302d98f22dc172/C is initiating minor compaction (all files) 2024-11-27T16:23:02,466 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 00f170535dc2739662302d98f22dc172/C in TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172. 2024-11-27T16:23:02,466 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/C/bc09cf13db674d4999d215696716ec65, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/C/9b5e200e5cca4031ac8f0b1cf6b9ef83, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/C/73e7961fa2094c2286e3b553d2a4fd72] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp, totalSize=35.2 K 2024-11-27T16:23:02,467 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting bc09cf13db674d4999d215696716ec65, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=14, earliestPutTs=1732724581575 2024-11-27T16:23:02,467 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting 9b5e200e5cca4031ac8f0b1cf6b9ef83, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=37, earliestPutTs=1732724581589 2024-11-27T16:23:02,467 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting 73e7961fa2094c2286e3b553d2a4fd72, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=52, earliestPutTs=1732724582216 2024-11-27T16:23:02,473 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 00f170535dc2739662302d98f22dc172#C#compaction#425 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-27T16:23:02,473 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/C/a87e27cf2302473fad06a9e2caabb5c5 is 50, key is test_row_0/C:col10/1732724582216/Put/seqid=0 2024-11-27T16:23:02,478 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742335_1511 (size=12104) 2024-11-27T16:23:02,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(8581): Flush requested on 00f170535dc2739662302d98f22dc172 2024-11-27T16:23:02,483 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 00f170535dc2739662302d98f22dc172 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-11-27T16:23:02,483 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 00f170535dc2739662302d98f22dc172, store=A 2024-11-27T16:23:02,483 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:23:02,483 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 00f170535dc2739662302d98f22dc172, store=B 2024-11-27T16:23:02,483 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:23:02,483 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 00f170535dc2739662302d98f22dc172, store=C 2024-11-27T16:23:02,483 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:23:02,489 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/A/1ef4213d8d6b4d3fb57a08cd37e46dd8 is 50, key is test_row_0/A:col10/1732724582480/Put/seqid=0 2024-11-27T16:23:02,498 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742336_1512 (size=14341) 2024-11-27T16:23:02,499 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=75 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/A/1ef4213d8d6b4d3fb57a08cd37e46dd8 2024-11-27T16:23:02,505 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:02,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38740 deadline: 1732724642494, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:02,506 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:02,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38754 deadline: 1732724642495, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:02,509 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/B/aa2fed1c24bc4c0086b4a6e4f571e1cc is 50, key is test_row_0/B:col10/1732724582480/Put/seqid=0 2024-11-27T16:23:02,509 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:02,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38730 deadline: 1732724642498, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:02,509 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:02,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38758 deadline: 1732724642500, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:02,516 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:02,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38716 deadline: 1732724642506, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:02,526 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742337_1513 (size=12001) 2024-11-27T16:23:02,527 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=75 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/B/aa2fed1c24bc4c0086b4a6e4f571e1cc 2024-11-27T16:23:02,536 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/C/cf43fa8566dc4feab1b584de59cd4db9 is 50, key is test_row_0/C:col10/1732724582480/Put/seqid=0 2024-11-27T16:23:02,554 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742338_1514 (size=12001) 2024-11-27T16:23:02,555 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=75 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/C/cf43fa8566dc4feab1b584de59cd4db9 2024-11-27T16:23:02,559 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/A/1ef4213d8d6b4d3fb57a08cd37e46dd8 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/A/1ef4213d8d6b4d3fb57a08cd37e46dd8 2024-11-27T16:23:02,562 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/A/1ef4213d8d6b4d3fb57a08cd37e46dd8, entries=200, sequenceid=75, filesize=14.0 K 2024-11-27T16:23:02,563 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/B/aa2fed1c24bc4c0086b4a6e4f571e1cc as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/B/aa2fed1c24bc4c0086b4a6e4f571e1cc 2024-11-27T16:23:02,566 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/B/aa2fed1c24bc4c0086b4a6e4f571e1cc, entries=150, sequenceid=75, filesize=11.7 K 2024-11-27T16:23:02,567 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/C/cf43fa8566dc4feab1b584de59cd4db9 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/C/cf43fa8566dc4feab1b584de59cd4db9 2024-11-27T16:23:02,571 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/C/cf43fa8566dc4feab1b584de59cd4db9, entries=150, sequenceid=75, filesize=11.7 K 2024-11-27T16:23:02,571 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~127.47 KB/130530, heapSize ~334.69 KB/342720, currentSize=73.80 KB/75570 for 00f170535dc2739662302d98f22dc172 in 89ms, sequenceid=75, compaction requested=false 2024-11-27T16:23:02,571 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 00f170535dc2739662302d98f22dc172: 2024-11-27T16:23:02,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(8581): Flush requested on 00f170535dc2739662302d98f22dc172 2024-11-27T16:23:02,618 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 00f170535dc2739662302d98f22dc172 3/3 column families, dataSize=80.51 KB heapSize=211.69 KB 2024-11-27T16:23:02,619 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 00f170535dc2739662302d98f22dc172, store=A 2024-11-27T16:23:02,619 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:23:02,619 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 00f170535dc2739662302d98f22dc172, store=B 2024-11-27T16:23:02,619 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:23:02,619 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 00f170535dc2739662302d98f22dc172, store=C 2024-11-27T16:23:02,619 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:23:02,624 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/A/5b25aac186af46c48d8dbce3fa2f4ab8 is 50, key is test_row_0/A:col10/1732724582503/Put/seqid=0 2024-11-27T16:23:02,629 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742339_1515 (size=14341) 2024-11-27T16:23:02,656 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:02,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38716 deadline: 1732724642647, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:02,657 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:02,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38740 deadline: 1732724642648, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:02,657 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:02,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38754 deadline: 1732724642648, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:02,658 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:02,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38730 deadline: 1732724642649, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:02,658 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:02,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38758 deadline: 1732724642655, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:02,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=124 2024-11-27T16:23:02,680 INFO [Thread-2216 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 124 completed 2024-11-27T16:23:02,681 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-27T16:23:02,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] procedure2.ProcedureExecutor(1098): Stored pid=126, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=126, table=TestAcidGuarantees 2024-11-27T16:23:02,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=126 2024-11-27T16:23:02,683 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=126, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=126, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-27T16:23:02,684 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=126, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=126, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-27T16:23:02,684 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=127, ppid=126, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-27T16:23:02,764 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:02,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38716 deadline: 1732724642757, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:02,764 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:02,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38740 deadline: 1732724642758, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:02,765 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:02,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38758 deadline: 1732724642759, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:02,765 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:02,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38730 deadline: 1732724642759, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:02,765 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:02,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38754 deadline: 1732724642759, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:02,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=126 2024-11-27T16:23:02,835 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:23:02,836 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=127 2024-11-27T16:23:02,836 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172. 2024-11-27T16:23:02,836 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172. as already flushing 2024-11-27T16:23:02,836 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172. 2024-11-27T16:23:02,836 ERROR [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] handler.RSProcedureHandler(58): pid=127 java.io.IOException: Unable to complete flush {ENCODED => 00f170535dc2739662302d98f22dc172, NAME => 'TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:23:02,837 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=127 java.io.IOException: Unable to complete flush {ENCODED => 00f170535dc2739662302d98f22dc172, NAME => 'TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:23:02,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4114): Remote procedure failed, pid=127 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 00f170535dc2739662302d98f22dc172, NAME => 'TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 00f170535dc2739662302d98f22dc172, NAME => 'TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:23:02,859 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/A/6a935771f09f4745af5b04dad37d5037 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/A/6a935771f09f4745af5b04dad37d5037 2024-11-27T16:23:02,863 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 00f170535dc2739662302d98f22dc172/A of 00f170535dc2739662302d98f22dc172 into 6a935771f09f4745af5b04dad37d5037(size=11.8 K), total size for store is 25.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T16:23:02,863 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 00f170535dc2739662302d98f22dc172: 2024-11-27T16:23:02,863 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172., storeName=00f170535dc2739662302d98f22dc172/A, priority=13, startTime=1732724582435; duration=0sec 2024-11-27T16:23:02,863 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T16:23:02,863 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 00f170535dc2739662302d98f22dc172:A 2024-11-27T16:23:02,883 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/C/a87e27cf2302473fad06a9e2caabb5c5 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/C/a87e27cf2302473fad06a9e2caabb5c5 2024-11-27T16:23:02,886 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 00f170535dc2739662302d98f22dc172/C of 00f170535dc2739662302d98f22dc172 into a87e27cf2302473fad06a9e2caabb5c5(size=11.8 K), total size for store is 23.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T16:23:02,886 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 00f170535dc2739662302d98f22dc172: 2024-11-27T16:23:02,886 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172., storeName=00f170535dc2739662302d98f22dc172/C, priority=13, startTime=1732724582435; duration=0sec 2024-11-27T16:23:02,887 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T16:23:02,887 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 00f170535dc2739662302d98f22dc172:C 2024-11-27T16:23:02,970 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:02,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38740 deadline: 1732724642966, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:02,972 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:02,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38758 deadline: 1732724642966, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:02,972 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:02,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38754 deadline: 1732724642966, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:02,973 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:02,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38716 deadline: 1732724642967, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:02,973 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:02,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38730 deadline: 1732724642967, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:02,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=126 2024-11-27T16:23:02,988 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:23:02,989 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=127 2024-11-27T16:23:02,989 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172. 2024-11-27T16:23:02,989 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172. as already flushing 2024-11-27T16:23:02,989 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172. 2024-11-27T16:23:02,989 ERROR [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] handler.RSProcedureHandler(58): pid=127 java.io.IOException: Unable to complete flush {ENCODED => 00f170535dc2739662302d98f22dc172, NAME => 'TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:23:02,989 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=127 java.io.IOException: Unable to complete flush {ENCODED => 00f170535dc2739662302d98f22dc172, NAME => 'TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:23:02,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4114): Remote procedure failed, pid=127 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 00f170535dc2739662302d98f22dc172, NAME => 'TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 00f170535dc2739662302d98f22dc172, NAME => 'TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:23:03,031 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=90 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/A/5b25aac186af46c48d8dbce3fa2f4ab8 2024-11-27T16:23:03,036 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/B/dacb4904cd2b4d3b9fbebe24e24bc28d is 50, key is test_row_0/B:col10/1732724582503/Put/seqid=0 2024-11-27T16:23:03,043 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742340_1516 (size=12001) 2024-11-27T16:23:03,044 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=90 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/B/dacb4904cd2b4d3b9fbebe24e24bc28d 2024-11-27T16:23:03,050 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/C/4be17d9b935745c893c0d2615defaba3 is 50, key is test_row_0/C:col10/1732724582503/Put/seqid=0 2024-11-27T16:23:03,053 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742341_1517 (size=12001) 2024-11-27T16:23:03,141 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:23:03,142 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=127 2024-11-27T16:23:03,142 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172. 2024-11-27T16:23:03,142 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172. as already flushing 2024-11-27T16:23:03,142 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172. 2024-11-27T16:23:03,142 ERROR [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] handler.RSProcedureHandler(58): pid=127 java.io.IOException: Unable to complete flush {ENCODED => 00f170535dc2739662302d98f22dc172, NAME => 'TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:23:03,142 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=127 java.io.IOException: Unable to complete flush {ENCODED => 00f170535dc2739662302d98f22dc172, NAME => 'TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:23:03,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4114): Remote procedure failed, pid=127 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 00f170535dc2739662302d98f22dc172, NAME => 'TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 00f170535dc2739662302d98f22dc172, NAME => 'TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:23:03,275 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:03,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38740 deadline: 1732724643272, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:03,277 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:03,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38716 deadline: 1732724643275, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:03,278 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:03,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38730 deadline: 1732724643275, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:03,279 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:03,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38758 deadline: 1732724643275, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:03,280 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:03,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38754 deadline: 1732724643275, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:03,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=126 2024-11-27T16:23:03,294 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:23:03,295 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=127 2024-11-27T16:23:03,295 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172. 2024-11-27T16:23:03,295 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172. as already flushing 2024-11-27T16:23:03,295 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172. 2024-11-27T16:23:03,295 ERROR [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] handler.RSProcedureHandler(58): pid=127 java.io.IOException: Unable to complete flush {ENCODED => 00f170535dc2739662302d98f22dc172, NAME => 'TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:23:03,295 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=127 java.io.IOException: Unable to complete flush {ENCODED => 00f170535dc2739662302d98f22dc172, NAME => 'TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:23:03,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4114): Remote procedure failed, pid=127 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 00f170535dc2739662302d98f22dc172, NAME => 'TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 00f170535dc2739662302d98f22dc172, NAME => 'TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:23:03,447 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:23:03,447 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=127 2024-11-27T16:23:03,447 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172. 2024-11-27T16:23:03,448 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172. as already flushing 2024-11-27T16:23:03,448 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172. 2024-11-27T16:23:03,448 ERROR [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] handler.RSProcedureHandler(58): pid=127 java.io.IOException: Unable to complete flush {ENCODED => 00f170535dc2739662302d98f22dc172, NAME => 'TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:23:03,448 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=127 java.io.IOException: Unable to complete flush {ENCODED => 00f170535dc2739662302d98f22dc172, NAME => 'TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:23:03,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4114): Remote procedure failed, pid=127 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 00f170535dc2739662302d98f22dc172, NAME => 'TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 00f170535dc2739662302d98f22dc172, NAME => 'TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:23:03,454 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=90 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/C/4be17d9b935745c893c0d2615defaba3 2024-11-27T16:23:03,458 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/A/5b25aac186af46c48d8dbce3fa2f4ab8 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/A/5b25aac186af46c48d8dbce3fa2f4ab8 2024-11-27T16:23:03,461 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/A/5b25aac186af46c48d8dbce3fa2f4ab8, entries=200, sequenceid=90, filesize=14.0 K 2024-11-27T16:23:03,462 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/B/dacb4904cd2b4d3b9fbebe24e24bc28d as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/B/dacb4904cd2b4d3b9fbebe24e24bc28d 2024-11-27T16:23:03,465 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/B/dacb4904cd2b4d3b9fbebe24e24bc28d, entries=150, sequenceid=90, filesize=11.7 K 2024-11-27T16:23:03,466 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/C/4be17d9b935745c893c0d2615defaba3 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/C/4be17d9b935745c893c0d2615defaba3 2024-11-27T16:23:03,469 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/C/4be17d9b935745c893c0d2615defaba3, entries=150, sequenceid=90, filesize=11.7 K 2024-11-27T16:23:03,469 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=120.76 KB/123660 for 00f170535dc2739662302d98f22dc172 in 851ms, sequenceid=90, compaction requested=true 2024-11-27T16:23:03,469 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 00f170535dc2739662302d98f22dc172: 2024-11-27T16:23:03,470 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 00f170535dc2739662302d98f22dc172:A, priority=-2147483648, current under compaction store size is 1 2024-11-27T16:23:03,470 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-27T16:23:03,470 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T16:23:03,470 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-27T16:23:03,470 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 00f170535dc2739662302d98f22dc172:B, priority=-2147483648, current under compaction store size is 2 2024-11-27T16:23:03,470 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T16:23:03,470 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 00f170535dc2739662302d98f22dc172:C, priority=-2147483648, current under compaction store size is 3 2024-11-27T16:23:03,470 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-27T16:23:03,470 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 40786 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-27T16:23:03,470 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-27T16:23:03,470 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HStore(1540): 00f170535dc2739662302d98f22dc172/A is initiating minor compaction (all files) 2024-11-27T16:23:03,470 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HStore(1540): 00f170535dc2739662302d98f22dc172/B is initiating minor compaction (all files) 2024-11-27T16:23:03,471 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 00f170535dc2739662302d98f22dc172/A in TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172. 2024-11-27T16:23:03,471 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 00f170535dc2739662302d98f22dc172/B in TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172. 2024-11-27T16:23:03,471 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/A/6a935771f09f4745af5b04dad37d5037, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/A/1ef4213d8d6b4d3fb57a08cd37e46dd8, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/A/5b25aac186af46c48d8dbce3fa2f4ab8] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp, totalSize=39.8 K 2024-11-27T16:23:03,471 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/B/2ef032029d5e43c0ba0d6d0e070f28c4, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/B/aa2fed1c24bc4c0086b4a6e4f571e1cc, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/B/dacb4904cd2b4d3b9fbebe24e24bc28d] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp, totalSize=35.3 K 2024-11-27T16:23:03,471 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6a935771f09f4745af5b04dad37d5037, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=52, earliestPutTs=1732724582216 2024-11-27T16:23:03,471 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting 2ef032029d5e43c0ba0d6d0e070f28c4, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=52, earliestPutTs=1732724582216 2024-11-27T16:23:03,471 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting aa2fed1c24bc4c0086b4a6e4f571e1cc, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=75, earliestPutTs=1732724582369 2024-11-27T16:23:03,471 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1ef4213d8d6b4d3fb57a08cd37e46dd8, keycount=200, bloomtype=ROW, size=14.0 K, encoding=NONE, compression=NONE, seqNum=75, earliestPutTs=1732724582361 2024-11-27T16:23:03,471 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting dacb4904cd2b4d3b9fbebe24e24bc28d, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=90, earliestPutTs=1732724582493 2024-11-27T16:23:03,472 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5b25aac186af46c48d8dbce3fa2f4ab8, keycount=200, bloomtype=ROW, size=14.0 K, encoding=NONE, compression=NONE, seqNum=90, earliestPutTs=1732724582493 2024-11-27T16:23:03,478 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 00f170535dc2739662302d98f22dc172#A#compaction#432 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-27T16:23:03,479 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/A/202a5835ff2347a094a39485e09d0d05 is 50, key is test_row_0/A:col10/1732724582503/Put/seqid=0 2024-11-27T16:23:03,481 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 00f170535dc2739662302d98f22dc172#B#compaction#433 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-27T16:23:03,481 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/B/2b9929e0e6984d6e9208ee98a0ceeba3 is 50, key is test_row_0/B:col10/1732724582503/Put/seqid=0 2024-11-27T16:23:03,490 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742342_1518 (size=12207) 2024-11-27T16:23:03,501 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742343_1519 (size=12207) 2024-11-27T16:23:03,600 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:23:03,600 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=127 2024-11-27T16:23:03,600 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172. 2024-11-27T16:23:03,600 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HRegion(2837): Flushing 00f170535dc2739662302d98f22dc172 3/3 column families, dataSize=120.76 KB heapSize=317.16 KB 2024-11-27T16:23:03,601 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 00f170535dc2739662302d98f22dc172, store=A 2024-11-27T16:23:03,601 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:23:03,601 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 00f170535dc2739662302d98f22dc172, store=B 2024-11-27T16:23:03,601 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:23:03,601 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 00f170535dc2739662302d98f22dc172, store=C 2024-11-27T16:23:03,601 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:23:03,606 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/A/abd2ccbf670c47f89a02dea2886333a8 is 50, key is test_row_0/A:col10/1732724582646/Put/seqid=0 2024-11-27T16:23:03,613 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742344_1520 (size=12001) 2024-11-27T16:23:03,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(8581): Flush requested on 00f170535dc2739662302d98f22dc172 2024-11-27T16:23:03,783 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172. as already flushing 2024-11-27T16:23:03,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=126 2024-11-27T16:23:03,801 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:03,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38754 deadline: 1732724643793, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:03,803 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:03,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38730 deadline: 1732724643794, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:03,804 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:03,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38758 deadline: 1732724643795, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:03,804 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:03,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38716 deadline: 1732724643798, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:03,804 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:03,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38740 deadline: 1732724643799, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:03,897 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/B/2b9929e0e6984d6e9208ee98a0ceeba3 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/B/2b9929e0e6984d6e9208ee98a0ceeba3 2024-11-27T16:23:03,903 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 00f170535dc2739662302d98f22dc172/B of 00f170535dc2739662302d98f22dc172 into 2b9929e0e6984d6e9208ee98a0ceeba3(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T16:23:03,903 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 00f170535dc2739662302d98f22dc172: 2024-11-27T16:23:03,903 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172., storeName=00f170535dc2739662302d98f22dc172/B, priority=13, startTime=1732724583470; duration=0sec 2024-11-27T16:23:03,903 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-27T16:23:03,903 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 00f170535dc2739662302d98f22dc172:B 2024-11-27T16:23:03,903 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-27T16:23:03,905 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:03,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38754 deadline: 1732724643902, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:03,906 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-27T16:23:03,907 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HStore(1540): 00f170535dc2739662302d98f22dc172/C is initiating minor compaction (all files) 2024-11-27T16:23:03,907 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 00f170535dc2739662302d98f22dc172/C in TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172. 2024-11-27T16:23:03,907 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/C/a87e27cf2302473fad06a9e2caabb5c5, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/C/cf43fa8566dc4feab1b584de59cd4db9, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/C/4be17d9b935745c893c0d2615defaba3] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp, totalSize=35.3 K 2024-11-27T16:23:03,908 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting a87e27cf2302473fad06a9e2caabb5c5, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=52, earliestPutTs=1732724582216 2024-11-27T16:23:03,909 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting cf43fa8566dc4feab1b584de59cd4db9, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=75, earliestPutTs=1732724582369 2024-11-27T16:23:03,909 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting 4be17d9b935745c893c0d2615defaba3, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=90, earliestPutTs=1732724582493 2024-11-27T16:23:03,910 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/A/202a5835ff2347a094a39485e09d0d05 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/A/202a5835ff2347a094a39485e09d0d05 2024-11-27T16:23:03,912 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:03,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38730 deadline: 1732724643904, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:03,915 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:03,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38758 deadline: 1732724643906, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:03,915 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:03,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38716 deadline: 1732724643906, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:03,916 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 00f170535dc2739662302d98f22dc172/A of 00f170535dc2739662302d98f22dc172 into 202a5835ff2347a094a39485e09d0d05(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T16:23:03,916 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 00f170535dc2739662302d98f22dc172: 2024-11-27T16:23:03,916 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172., storeName=00f170535dc2739662302d98f22dc172/A, priority=13, startTime=1732724583469; duration=0sec 2024-11-27T16:23:03,916 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:03,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38740 deadline: 1732724643906, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:03,916 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T16:23:03,916 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 00f170535dc2739662302d98f22dc172:A 2024-11-27T16:23:03,930 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 00f170535dc2739662302d98f22dc172#C#compaction#435 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-27T16:23:03,930 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/C/d3f30e7e760a4225b9c2d3b2599c943e is 50, key is test_row_0/C:col10/1732724582503/Put/seqid=0 2024-11-27T16:23:03,957 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742345_1521 (size=12207) 2024-11-27T16:23:04,014 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=113 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/A/abd2ccbf670c47f89a02dea2886333a8 2024-11-27T16:23:04,021 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/B/0ee44db78c9845d6b1b73872ee90757f is 50, key is test_row_0/B:col10/1732724582646/Put/seqid=0 2024-11-27T16:23:04,033 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742346_1522 (size=12001) 2024-11-27T16:23:04,034 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=113 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/B/0ee44db78c9845d6b1b73872ee90757f 2024-11-27T16:23:04,040 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/C/27a6d4aae8904a31a0f5a5c2c19fafdc is 50, key is test_row_0/C:col10/1732724582646/Put/seqid=0 2024-11-27T16:23:04,047 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742347_1523 (size=12001) 2024-11-27T16:23:04,048 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=113 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/C/27a6d4aae8904a31a0f5a5c2c19fafdc 2024-11-27T16:23:04,052 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/A/abd2ccbf670c47f89a02dea2886333a8 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/A/abd2ccbf670c47f89a02dea2886333a8 2024-11-27T16:23:04,055 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/A/abd2ccbf670c47f89a02dea2886333a8, entries=150, sequenceid=113, filesize=11.7 K 2024-11-27T16:23:04,056 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/B/0ee44db78c9845d6b1b73872ee90757f as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/B/0ee44db78c9845d6b1b73872ee90757f 2024-11-27T16:23:04,060 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/B/0ee44db78c9845d6b1b73872ee90757f, entries=150, sequenceid=113, filesize=11.7 K 2024-11-27T16:23:04,061 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/C/27a6d4aae8904a31a0f5a5c2c19fafdc as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/C/27a6d4aae8904a31a0f5a5c2c19fafdc 2024-11-27T16:23:04,065 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/C/27a6d4aae8904a31a0f5a5c2c19fafdc, entries=150, sequenceid=113, filesize=11.7 K 2024-11-27T16:23:04,065 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HRegion(3040): Finished flush of dataSize ~120.76 KB/123660, heapSize ~317.11 KB/324720, currentSize=80.51 KB/82440 for 00f170535dc2739662302d98f22dc172 in 465ms, sequenceid=113, compaction requested=false 2024-11-27T16:23:04,065 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HRegion(2538): Flush status journal for 00f170535dc2739662302d98f22dc172: 2024-11-27T16:23:04,066 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172. 2024-11-27T16:23:04,066 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=127 2024-11-27T16:23:04,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4106): Remote procedure done, pid=127 2024-11-27T16:23:04,068 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=127, resume processing ppid=126 2024-11-27T16:23:04,068 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=127, ppid=126, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.3830 sec 2024-11-27T16:23:04,069 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=126, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=126, table=TestAcidGuarantees in 1.3870 sec 2024-11-27T16:23:04,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(8581): Flush requested on 00f170535dc2739662302d98f22dc172 2024-11-27T16:23:04,110 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 00f170535dc2739662302d98f22dc172 3/3 column families, dataSize=87.22 KB heapSize=229.27 KB 2024-11-27T16:23:04,110 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 00f170535dc2739662302d98f22dc172, store=A 2024-11-27T16:23:04,111 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:23:04,111 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 00f170535dc2739662302d98f22dc172, store=B 2024-11-27T16:23:04,111 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:23:04,111 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 00f170535dc2739662302d98f22dc172, store=C 2024-11-27T16:23:04,111 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:23:04,114 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/A/8ceff44c27d64b97b7c6930348021134 is 50, key is test_row_0/A:col10/1732724584109/Put/seqid=0 2024-11-27T16:23:04,119 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742348_1524 (size=14391) 2024-11-27T16:23:04,120 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=131 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/A/8ceff44c27d64b97b7c6930348021134 2024-11-27T16:23:04,129 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/B/1c49febce62f4dc69e9c789364f1b733 is 50, key is test_row_0/B:col10/1732724584109/Put/seqid=0 2024-11-27T16:23:04,132 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742349_1525 (size=12051) 2024-11-27T16:23:04,148 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:04,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38716 deadline: 1732724644141, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:04,149 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:04,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38754 deadline: 1732724644142, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:04,149 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:04,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38758 deadline: 1732724644143, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:04,150 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:04,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38740 deadline: 1732724644144, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:04,150 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:04,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38730 deadline: 1732724644144, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:04,253 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:04,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38716 deadline: 1732724644249, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:04,253 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:04,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38754 deadline: 1732724644250, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:04,254 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:04,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38758 deadline: 1732724644250, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:04,255 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:04,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38740 deadline: 1732724644251, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:04,255 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:04,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38730 deadline: 1732724644251, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:04,361 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/C/d3f30e7e760a4225b9c2d3b2599c943e as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/C/d3f30e7e760a4225b9c2d3b2599c943e 2024-11-27T16:23:04,365 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 00f170535dc2739662302d98f22dc172/C of 00f170535dc2739662302d98f22dc172 into d3f30e7e760a4225b9c2d3b2599c943e(size=11.9 K), total size for store is 23.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T16:23:04,365 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 00f170535dc2739662302d98f22dc172: 2024-11-27T16:23:04,365 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172., storeName=00f170535dc2739662302d98f22dc172/C, priority=13, startTime=1732724583470; duration=0sec 2024-11-27T16:23:04,365 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T16:23:04,365 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 00f170535dc2739662302d98f22dc172:C 2024-11-27T16:23:04,463 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:04,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38758 deadline: 1732724644455, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:04,463 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:04,463 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:04,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38716 deadline: 1732724644455, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:04,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38754 deadline: 1732724644455, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:04,464 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:04,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38740 deadline: 1732724644457, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:04,464 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:04,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38730 deadline: 1732724644457, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:04,533 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=131 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/B/1c49febce62f4dc69e9c789364f1b733 2024-11-27T16:23:04,540 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/C/db9576b0a23c42a6a00a197db4a989c5 is 50, key is test_row_0/C:col10/1732724584109/Put/seqid=0 2024-11-27T16:23:04,561 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742350_1526 (size=12051) 2024-11-27T16:23:04,562 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=131 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/C/db9576b0a23c42a6a00a197db4a989c5 2024-11-27T16:23:04,567 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/A/8ceff44c27d64b97b7c6930348021134 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/A/8ceff44c27d64b97b7c6930348021134 2024-11-27T16:23:04,571 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/A/8ceff44c27d64b97b7c6930348021134, entries=200, sequenceid=131, filesize=14.1 K 2024-11-27T16:23:04,572 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/B/1c49febce62f4dc69e9c789364f1b733 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/B/1c49febce62f4dc69e9c789364f1b733 2024-11-27T16:23:04,575 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/B/1c49febce62f4dc69e9c789364f1b733, entries=150, sequenceid=131, filesize=11.8 K 2024-11-27T16:23:04,576 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/C/db9576b0a23c42a6a00a197db4a989c5 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/C/db9576b0a23c42a6a00a197db4a989c5 2024-11-27T16:23:04,579 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/C/db9576b0a23c42a6a00a197db4a989c5, entries=150, sequenceid=131, filesize=11.8 K 2024-11-27T16:23:04,579 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~87.22 KB/89310, heapSize ~229.22 KB/234720, currentSize=114.05 KB/116790 for 00f170535dc2739662302d98f22dc172 in 469ms, sequenceid=131, compaction requested=true 2024-11-27T16:23:04,579 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 00f170535dc2739662302d98f22dc172: 2024-11-27T16:23:04,580 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 00f170535dc2739662302d98f22dc172:A, priority=-2147483648, current under compaction store size is 1 2024-11-27T16:23:04,580 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T16:23:04,580 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-27T16:23:04,580 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-27T16:23:04,580 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 00f170535dc2739662302d98f22dc172:B, priority=-2147483648, current under compaction store size is 2 2024-11-27T16:23:04,580 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T16:23:04,580 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 00f170535dc2739662302d98f22dc172:C, priority=-2147483648, current under compaction store size is 3 2024-11-27T16:23:04,580 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-27T16:23:04,581 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 38599 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-27T16:23:04,581 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36259 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-27T16:23:04,582 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HStore(1540): 00f170535dc2739662302d98f22dc172/A is initiating minor compaction (all files) 2024-11-27T16:23:04,582 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HStore(1540): 00f170535dc2739662302d98f22dc172/B is initiating minor compaction (all files) 2024-11-27T16:23:04,582 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 00f170535dc2739662302d98f22dc172/A in TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172. 2024-11-27T16:23:04,582 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 00f170535dc2739662302d98f22dc172/B in TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172. 2024-11-27T16:23:04,582 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/A/202a5835ff2347a094a39485e09d0d05, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/A/abd2ccbf670c47f89a02dea2886333a8, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/A/8ceff44c27d64b97b7c6930348021134] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp, totalSize=37.7 K 2024-11-27T16:23:04,582 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/B/2b9929e0e6984d6e9208ee98a0ceeba3, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/B/0ee44db78c9845d6b1b73872ee90757f, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/B/1c49febce62f4dc69e9c789364f1b733] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp, totalSize=35.4 K 2024-11-27T16:23:04,582 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 202a5835ff2347a094a39485e09d0d05, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=90, earliestPutTs=1732724582493 2024-11-27T16:23:04,582 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting 2b9929e0e6984d6e9208ee98a0ceeba3, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=90, earliestPutTs=1732724582493 2024-11-27T16:23:04,582 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.Compactor(224): Compacting abd2ccbf670c47f89a02dea2886333a8, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=113, earliestPutTs=1732724582646 2024-11-27T16:23:04,582 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting 0ee44db78c9845d6b1b73872ee90757f, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=113, earliestPutTs=1732724582646 2024-11-27T16:23:04,583 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8ceff44c27d64b97b7c6930348021134, keycount=200, bloomtype=ROW, size=14.1 K, encoding=NONE, compression=NONE, seqNum=131, earliestPutTs=1732724583792 2024-11-27T16:23:04,583 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting 1c49febce62f4dc69e9c789364f1b733, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=131, earliestPutTs=1732724583793 2024-11-27T16:23:04,591 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 00f170535dc2739662302d98f22dc172#A#compaction#441 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-27T16:23:04,591 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/A/027b19a0d4854dd49aa8c5c779731eba is 50, key is test_row_0/A:col10/1732724584109/Put/seqid=0 2024-11-27T16:23:04,598 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 00f170535dc2739662302d98f22dc172#B#compaction#442 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-27T16:23:04,598 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742351_1527 (size=12359) 2024-11-27T16:23:04,598 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/B/1c5c35968da64675ae2f58218f9241a5 is 50, key is test_row_0/B:col10/1732724584109/Put/seqid=0 2024-11-27T16:23:04,607 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742352_1528 (size=12359) 2024-11-27T16:23:04,611 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/B/1c5c35968da64675ae2f58218f9241a5 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/B/1c5c35968da64675ae2f58218f9241a5 2024-11-27T16:23:04,614 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 00f170535dc2739662302d98f22dc172/B of 00f170535dc2739662302d98f22dc172 into 1c5c35968da64675ae2f58218f9241a5(size=12.1 K), total size for store is 12.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T16:23:04,614 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 00f170535dc2739662302d98f22dc172: 2024-11-27T16:23:04,614 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172., storeName=00f170535dc2739662302d98f22dc172/B, priority=13, startTime=1732724584580; duration=0sec 2024-11-27T16:23:04,615 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-27T16:23:04,615 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 00f170535dc2739662302d98f22dc172:B 2024-11-27T16:23:04,615 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-27T16:23:04,615 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36259 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-27T16:23:04,615 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HStore(1540): 00f170535dc2739662302d98f22dc172/C is initiating minor compaction (all files) 2024-11-27T16:23:04,615 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 00f170535dc2739662302d98f22dc172/C in TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172. 2024-11-27T16:23:04,616 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/C/d3f30e7e760a4225b9c2d3b2599c943e, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/C/27a6d4aae8904a31a0f5a5c2c19fafdc, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/C/db9576b0a23c42a6a00a197db4a989c5] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp, totalSize=35.4 K 2024-11-27T16:23:04,616 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting d3f30e7e760a4225b9c2d3b2599c943e, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=90, earliestPutTs=1732724582493 2024-11-27T16:23:04,617 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting 27a6d4aae8904a31a0f5a5c2c19fafdc, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=113, earliestPutTs=1732724582646 2024-11-27T16:23:04,617 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting db9576b0a23c42a6a00a197db4a989c5, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=131, earliestPutTs=1732724583793 2024-11-27T16:23:04,623 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 00f170535dc2739662302d98f22dc172#C#compaction#443 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-27T16:23:04,624 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/C/8e9827543ccf4ab291448b32af8e18c0 is 50, key is test_row_0/C:col10/1732724584109/Put/seqid=0 2024-11-27T16:23:04,627 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742353_1529 (size=12359) 2024-11-27T16:23:04,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(8581): Flush requested on 00f170535dc2739662302d98f22dc172 2024-11-27T16:23:04,768 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 00f170535dc2739662302d98f22dc172 3/3 column families, dataSize=120.76 KB heapSize=317.16 KB 2024-11-27T16:23:04,769 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 00f170535dc2739662302d98f22dc172, store=A 2024-11-27T16:23:04,769 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:23:04,769 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 00f170535dc2739662302d98f22dc172, store=B 2024-11-27T16:23:04,769 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:23:04,769 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 00f170535dc2739662302d98f22dc172, store=C 2024-11-27T16:23:04,769 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:23:04,773 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/A/173e5aa140f34d1b96fa583fb7d873f7 is 50, key is test_row_0/A:col10/1732724584143/Put/seqid=0 2024-11-27T16:23:04,777 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742354_1530 (size=14541) 2024-11-27T16:23:04,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=126 2024-11-27T16:23:04,787 INFO [Thread-2216 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 126 completed 2024-11-27T16:23:04,788 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-27T16:23:04,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] procedure2.ProcedureExecutor(1098): Stored pid=128, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=128, table=TestAcidGuarantees 2024-11-27T16:23:04,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=128 2024-11-27T16:23:04,789 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=128, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=128, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-27T16:23:04,790 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=128, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=128, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-27T16:23:04,790 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=129, ppid=128, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-27T16:23:04,791 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:04,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38716 deadline: 1732724644783, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:04,791 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:04,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38730 deadline: 1732724644783, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:04,791 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:04,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38758 deadline: 1732724644784, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:04,794 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:04,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38754 deadline: 1732724644788, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:04,798 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:04,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38740 deadline: 1732724644791, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:04,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=128 2024-11-27T16:23:04,899 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:04,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38716 deadline: 1732724644892, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:04,899 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:04,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38730 deadline: 1732724644892, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:04,899 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:04,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38758 deadline: 1732724644892, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:04,899 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:04,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38754 deadline: 1732724644895, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:04,904 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:04,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38740 deadline: 1732724644899, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:04,942 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:23:04,942 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=129 2024-11-27T16:23:04,943 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172. 2024-11-27T16:23:04,943 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172. as already flushing 2024-11-27T16:23:04,943 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172. 2024-11-27T16:23:04,943 ERROR [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] handler.RSProcedureHandler(58): pid=129 java.io.IOException: Unable to complete flush {ENCODED => 00f170535dc2739662302d98f22dc172, NAME => 'TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:23:04,943 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=129 java.io.IOException: Unable to complete flush {ENCODED => 00f170535dc2739662302d98f22dc172, NAME => 'TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:23:04,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4114): Remote procedure failed, pid=129 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 00f170535dc2739662302d98f22dc172, NAME => 'TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 00f170535dc2739662302d98f22dc172, NAME => 'TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:23:05,003 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/A/027b19a0d4854dd49aa8c5c779731eba as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/A/027b19a0d4854dd49aa8c5c779731eba 2024-11-27T16:23:05,007 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 00f170535dc2739662302d98f22dc172/A of 00f170535dc2739662302d98f22dc172 into 027b19a0d4854dd49aa8c5c779731eba(size=12.1 K), total size for store is 12.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T16:23:05,007 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 00f170535dc2739662302d98f22dc172: 2024-11-27T16:23:05,007 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172., storeName=00f170535dc2739662302d98f22dc172/A, priority=13, startTime=1732724584579; duration=0sec 2024-11-27T16:23:05,007 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T16:23:05,007 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 00f170535dc2739662302d98f22dc172:A 2024-11-27T16:23:05,031 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/C/8e9827543ccf4ab291448b32af8e18c0 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/C/8e9827543ccf4ab291448b32af8e18c0 2024-11-27T16:23:05,035 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 00f170535dc2739662302d98f22dc172/C of 00f170535dc2739662302d98f22dc172 into 8e9827543ccf4ab291448b32af8e18c0(size=12.1 K), total size for store is 12.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T16:23:05,035 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 00f170535dc2739662302d98f22dc172: 2024-11-27T16:23:05,035 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172., storeName=00f170535dc2739662302d98f22dc172/C, priority=13, startTime=1732724584580; duration=0sec 2024-11-27T16:23:05,035 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T16:23:05,035 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 00f170535dc2739662302d98f22dc172:C 2024-11-27T16:23:05,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=128 2024-11-27T16:23:05,095 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:23:05,095 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=129 2024-11-27T16:23:05,096 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172. 2024-11-27T16:23:05,096 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172. as already flushing 2024-11-27T16:23:05,096 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172. 2024-11-27T16:23:05,096 ERROR [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] handler.RSProcedureHandler(58): pid=129 java.io.IOException: Unable to complete flush {ENCODED => 00f170535dc2739662302d98f22dc172, NAME => 'TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:23:05,096 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=129 java.io.IOException: Unable to complete flush {ENCODED => 00f170535dc2739662302d98f22dc172, NAME => 'TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:23:05,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4114): Remote procedure failed, pid=129 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 00f170535dc2739662302d98f22dc172, NAME => 'TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 00f170535dc2739662302d98f22dc172, NAME => 'TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:23:05,103 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:05,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38758 deadline: 1732724645100, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:05,104 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:05,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38716 deadline: 1732724645101, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:05,104 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:05,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38730 deadline: 1732724645101, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:05,104 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:05,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38754 deadline: 1732724645101, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:05,110 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:05,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38740 deadline: 1732724645106, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:05,178 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=154 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/A/173e5aa140f34d1b96fa583fb7d873f7 2024-11-27T16:23:05,187 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/B/536f8ee4844d407c95eeeaec941024c7 is 50, key is test_row_0/B:col10/1732724584143/Put/seqid=0 2024-11-27T16:23:05,191 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742355_1531 (size=12151) 2024-11-27T16:23:05,194 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=154 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/B/536f8ee4844d407c95eeeaec941024c7 2024-11-27T16:23:05,200 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/C/19dd11dbe9db49de8303b7cfb00240d3 is 50, key is test_row_0/C:col10/1732724584143/Put/seqid=0 2024-11-27T16:23:05,204 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742356_1532 (size=12151) 2024-11-27T16:23:05,248 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:23:05,248 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=129 2024-11-27T16:23:05,248 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172. 2024-11-27T16:23:05,248 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172. as already flushing 2024-11-27T16:23:05,248 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172. 2024-11-27T16:23:05,249 ERROR [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] handler.RSProcedureHandler(58): pid=129 java.io.IOException: Unable to complete flush {ENCODED => 00f170535dc2739662302d98f22dc172, NAME => 'TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:23:05,249 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=129 java.io.IOException: Unable to complete flush {ENCODED => 00f170535dc2739662302d98f22dc172, NAME => 'TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:23:05,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4114): Remote procedure failed, pid=129 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 00f170535dc2739662302d98f22dc172, NAME => 'TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 00f170535dc2739662302d98f22dc172, NAME => 'TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:23:05,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=128 2024-11-27T16:23:05,403 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:23:05,404 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=129 2024-11-27T16:23:05,404 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172. 2024-11-27T16:23:05,404 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172. as already flushing 2024-11-27T16:23:05,404 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172. 2024-11-27T16:23:05,404 ERROR [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] handler.RSProcedureHandler(58): pid=129 java.io.IOException: Unable to complete flush {ENCODED => 00f170535dc2739662302d98f22dc172, NAME => 'TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:23:05,404 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=129 java.io.IOException: Unable to complete flush {ENCODED => 00f170535dc2739662302d98f22dc172, NAME => 'TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:23:05,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4114): Remote procedure failed, pid=129 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 00f170535dc2739662302d98f22dc172, NAME => 'TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 00f170535dc2739662302d98f22dc172, NAME => 'TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:23:05,410 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:05,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38754 deadline: 1732724645405, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:05,410 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:05,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38758 deadline: 1732724645406, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:05,410 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:05,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38716 deadline: 1732724645407, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:05,411 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:05,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38730 deadline: 1732724645407, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:05,417 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:05,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38740 deadline: 1732724645413, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:05,556 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:23:05,557 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=129 2024-11-27T16:23:05,557 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172. 2024-11-27T16:23:05,557 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172. as already flushing 2024-11-27T16:23:05,557 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172. 2024-11-27T16:23:05,557 ERROR [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] handler.RSProcedureHandler(58): pid=129 java.io.IOException: Unable to complete flush {ENCODED => 00f170535dc2739662302d98f22dc172, NAME => 'TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:23:05,557 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=129 java.io.IOException: Unable to complete flush {ENCODED => 00f170535dc2739662302d98f22dc172, NAME => 'TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:23:05,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4114): Remote procedure failed, pid=129 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 00f170535dc2739662302d98f22dc172, NAME => 'TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 00f170535dc2739662302d98f22dc172, NAME => 'TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:23:05,605 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=154 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/C/19dd11dbe9db49de8303b7cfb00240d3 2024-11-27T16:23:05,609 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/A/173e5aa140f34d1b96fa583fb7d873f7 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/A/173e5aa140f34d1b96fa583fb7d873f7 2024-11-27T16:23:05,613 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/A/173e5aa140f34d1b96fa583fb7d873f7, entries=200, sequenceid=154, filesize=14.2 K 2024-11-27T16:23:05,613 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/B/536f8ee4844d407c95eeeaec941024c7 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/B/536f8ee4844d407c95eeeaec941024c7 2024-11-27T16:23:05,617 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/B/536f8ee4844d407c95eeeaec941024c7, entries=150, sequenceid=154, filesize=11.9 K 2024-11-27T16:23:05,617 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/C/19dd11dbe9db49de8303b7cfb00240d3 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/C/19dd11dbe9db49de8303b7cfb00240d3 2024-11-27T16:23:05,620 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/C/19dd11dbe9db49de8303b7cfb00240d3, entries=150, sequenceid=154, filesize=11.9 K 2024-11-27T16:23:05,621 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~120.76 KB/123660, heapSize ~317.11 KB/324720, currentSize=80.51 KB/82440 for 00f170535dc2739662302d98f22dc172 in 853ms, sequenceid=154, compaction requested=false 2024-11-27T16:23:05,621 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 00f170535dc2739662302d98f22dc172: 2024-11-27T16:23:05,709 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:23:05,710 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=129 2024-11-27T16:23:05,710 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172. 2024-11-27T16:23:05,710 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HRegion(2837): Flushing 00f170535dc2739662302d98f22dc172 3/3 column families, dataSize=80.51 KB heapSize=211.69 KB 2024-11-27T16:23:05,710 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 00f170535dc2739662302d98f22dc172, store=A 2024-11-27T16:23:05,710 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:23:05,710 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 00f170535dc2739662302d98f22dc172, store=B 2024-11-27T16:23:05,710 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:23:05,710 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 00f170535dc2739662302d98f22dc172, store=C 2024-11-27T16:23:05,711 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:23:05,715 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/A/d04b564a13ca4e579fe1a63d84755b95 is 50, key is test_row_0/A:col10/1732724584789/Put/seqid=0 2024-11-27T16:23:05,718 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742357_1533 (size=12151) 2024-11-27T16:23:05,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=128 2024-11-27T16:23:05,915 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172. as already flushing 2024-11-27T16:23:05,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(8581): Flush requested on 00f170535dc2739662302d98f22dc172 2024-11-27T16:23:05,955 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:05,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38730 deadline: 1732724645947, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:05,956 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:05,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38758 deadline: 1732724645948, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:05,959 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:05,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38716 deadline: 1732724645949, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:05,960 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:05,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38740 deadline: 1732724645951, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:05,960 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:05,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38754 deadline: 1732724645951, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:06,006 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-11-27T16:23:06,062 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:06,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38730 deadline: 1732724646057, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:06,062 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:06,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38758 deadline: 1732724646057, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:06,063 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:06,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38716 deadline: 1732724646060, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:06,063 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:06,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38740 deadline: 1732724646061, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:06,064 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:06,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38754 deadline: 1732724646061, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:06,119 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=171 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/A/d04b564a13ca4e579fe1a63d84755b95 2024-11-27T16:23:06,127 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/B/aebfa2b0c5484d268712be576dfe8961 is 50, key is test_row_0/B:col10/1732724584789/Put/seqid=0 2024-11-27T16:23:06,138 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742358_1534 (size=12151) 2024-11-27T16:23:06,139 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=171 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/B/aebfa2b0c5484d268712be576dfe8961 2024-11-27T16:23:06,147 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/C/2d9bb4bd29d1480f9a8fba2008205144 is 50, key is test_row_0/C:col10/1732724584789/Put/seqid=0 2024-11-27T16:23:06,157 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742359_1535 (size=12151) 2024-11-27T16:23:06,159 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=171 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/C/2d9bb4bd29d1480f9a8fba2008205144 2024-11-27T16:23:06,176 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/A/d04b564a13ca4e579fe1a63d84755b95 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/A/d04b564a13ca4e579fe1a63d84755b95 2024-11-27T16:23:06,179 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/A/d04b564a13ca4e579fe1a63d84755b95, entries=150, sequenceid=171, filesize=11.9 K 2024-11-27T16:23:06,181 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/B/aebfa2b0c5484d268712be576dfe8961 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/B/aebfa2b0c5484d268712be576dfe8961 2024-11-27T16:23:06,183 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/B/aebfa2b0c5484d268712be576dfe8961, entries=150, sequenceid=171, filesize=11.9 K 2024-11-27T16:23:06,184 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/C/2d9bb4bd29d1480f9a8fba2008205144 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/C/2d9bb4bd29d1480f9a8fba2008205144 2024-11-27T16:23:06,187 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/C/2d9bb4bd29d1480f9a8fba2008205144, entries=150, sequenceid=171, filesize=11.9 K 2024-11-27T16:23:06,188 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=127.47 KB/130530 for 00f170535dc2739662302d98f22dc172 in 478ms, sequenceid=171, compaction requested=true 2024-11-27T16:23:06,188 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HRegion(2538): Flush status journal for 00f170535dc2739662302d98f22dc172: 2024-11-27T16:23:06,188 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172. 2024-11-27T16:23:06,188 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=129 2024-11-27T16:23:06,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4106): Remote procedure done, pid=129 2024-11-27T16:23:06,190 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=129, resume processing ppid=128 2024-11-27T16:23:06,190 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=129, ppid=128, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.3990 sec 2024-11-27T16:23:06,191 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=128, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=128, table=TestAcidGuarantees in 1.4020 sec 2024-11-27T16:23:06,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(8581): Flush requested on 00f170535dc2739662302d98f22dc172 2024-11-27T16:23:06,267 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 00f170535dc2739662302d98f22dc172 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-11-27T16:23:06,268 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 00f170535dc2739662302d98f22dc172, store=A 2024-11-27T16:23:06,268 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:23:06,268 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 00f170535dc2739662302d98f22dc172, store=B 2024-11-27T16:23:06,268 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:23:06,268 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 00f170535dc2739662302d98f22dc172, store=C 2024-11-27T16:23:06,268 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:23:06,275 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/A/bf69d9ce36f4465596589255c77d53d0 is 50, key is test_row_0/A:col10/1732724585931/Put/seqid=0 2024-11-27T16:23:06,286 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742360_1536 (size=14541) 2024-11-27T16:23:06,286 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=194 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/A/bf69d9ce36f4465596589255c77d53d0 2024-11-27T16:23:06,288 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:06,288 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:06,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38730 deadline: 1732724646278, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:06,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38740 deadline: 1732724646277, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:06,289 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:06,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38754 deadline: 1732724646279, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:06,289 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:06,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38758 deadline: 1732724646280, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:06,289 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:06,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38716 deadline: 1732724646280, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:06,292 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/B/40ad39a02eb545bea4cb7160c8428f0b is 50, key is test_row_0/B:col10/1732724585931/Put/seqid=0 2024-11-27T16:23:06,295 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742361_1537 (size=12151) 2024-11-27T16:23:06,394 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:06,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38740 deadline: 1732724646390, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:06,395 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:06,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38730 deadline: 1732724646390, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:06,395 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:06,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38758 deadline: 1732724646390, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:06,395 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:06,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38754 deadline: 1732724646390, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:06,395 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:06,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38716 deadline: 1732724646390, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:06,600 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:06,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38730 deadline: 1732724646596, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:06,600 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:06,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38758 deadline: 1732724646596, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:06,600 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:06,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38740 deadline: 1732724646596, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:06,601 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:06,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38716 deadline: 1732724646596, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:06,601 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:06,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38754 deadline: 1732724646597, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:06,696 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=194 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/B/40ad39a02eb545bea4cb7160c8428f0b 2024-11-27T16:23:06,703 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/C/20faed288a4c4b2aae624e73c58c80e2 is 50, key is test_row_0/C:col10/1732724585931/Put/seqid=0 2024-11-27T16:23:06,707 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742362_1538 (size=12151) 2024-11-27T16:23:06,708 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=194 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/C/20faed288a4c4b2aae624e73c58c80e2 2024-11-27T16:23:06,712 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/A/bf69d9ce36f4465596589255c77d53d0 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/A/bf69d9ce36f4465596589255c77d53d0 2024-11-27T16:23:06,716 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/A/bf69d9ce36f4465596589255c77d53d0, entries=200, sequenceid=194, filesize=14.2 K 2024-11-27T16:23:06,717 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/B/40ad39a02eb545bea4cb7160c8428f0b as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/B/40ad39a02eb545bea4cb7160c8428f0b 2024-11-27T16:23:06,720 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/B/40ad39a02eb545bea4cb7160c8428f0b, entries=150, sequenceid=194, filesize=11.9 K 2024-11-27T16:23:06,721 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/C/20faed288a4c4b2aae624e73c58c80e2 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/C/20faed288a4c4b2aae624e73c58c80e2 2024-11-27T16:23:06,723 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/C/20faed288a4c4b2aae624e73c58c80e2, entries=150, sequenceid=194, filesize=11.9 K 2024-11-27T16:23:06,724 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=67.09 KB/68700 for 00f170535dc2739662302d98f22dc172 in 457ms, sequenceid=194, compaction requested=true 2024-11-27T16:23:06,724 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 00f170535dc2739662302d98f22dc172: 2024-11-27T16:23:06,724 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-27T16:23:06,725 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 00f170535dc2739662302d98f22dc172:A, priority=-2147483648, current under compaction store size is 1 2024-11-27T16:23:06,725 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T16:23:06,725 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-27T16:23:06,725 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 00f170535dc2739662302d98f22dc172:B, priority=-2147483648, current under compaction store size is 2 2024-11-27T16:23:06,725 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T16:23:06,725 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 00f170535dc2739662302d98f22dc172:C, priority=-2147483648, current under compaction store size is 3 2024-11-27T16:23:06,725 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-27T16:23:06,726 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48812 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-27T16:23:06,726 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 53592 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-27T16:23:06,726 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HStore(1540): 00f170535dc2739662302d98f22dc172/A is initiating minor compaction (all files) 2024-11-27T16:23:06,726 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HStore(1540): 00f170535dc2739662302d98f22dc172/B is initiating minor compaction (all files) 2024-11-27T16:23:06,726 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 00f170535dc2739662302d98f22dc172/A in TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172. 2024-11-27T16:23:06,726 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 00f170535dc2739662302d98f22dc172/B in TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172. 2024-11-27T16:23:06,727 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/A/027b19a0d4854dd49aa8c5c779731eba, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/A/173e5aa140f34d1b96fa583fb7d873f7, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/A/d04b564a13ca4e579fe1a63d84755b95, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/A/bf69d9ce36f4465596589255c77d53d0] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp, totalSize=52.3 K 2024-11-27T16:23:06,727 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/B/1c5c35968da64675ae2f58218f9241a5, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/B/536f8ee4844d407c95eeeaec941024c7, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/B/aebfa2b0c5484d268712be576dfe8961, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/B/40ad39a02eb545bea4cb7160c8428f0b] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp, totalSize=47.7 K 2024-11-27T16:23:06,727 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting 1c5c35968da64675ae2f58218f9241a5, keycount=150, bloomtype=ROW, size=12.1 K, encoding=NONE, compression=NONE, seqNum=131, earliestPutTs=1732724583793 2024-11-27T16:23:06,727 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 027b19a0d4854dd49aa8c5c779731eba, keycount=150, bloomtype=ROW, size=12.1 K, encoding=NONE, compression=NONE, seqNum=131, earliestPutTs=1732724583793 2024-11-27T16:23:06,727 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 173e5aa140f34d1b96fa583fb7d873f7, keycount=200, bloomtype=ROW, size=14.2 K, encoding=NONE, compression=NONE, seqNum=154, earliestPutTs=1732724584136 2024-11-27T16:23:06,727 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting 536f8ee4844d407c95eeeaec941024c7, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=154, earliestPutTs=1732724584136 2024-11-27T16:23:06,728 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.Compactor(224): Compacting d04b564a13ca4e579fe1a63d84755b95, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=171, earliestPutTs=1732724584782 2024-11-27T16:23:06,728 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting aebfa2b0c5484d268712be576dfe8961, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=171, earliestPutTs=1732724584782 2024-11-27T16:23:06,728 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.Compactor(224): Compacting bf69d9ce36f4465596589255c77d53d0, keycount=200, bloomtype=ROW, size=14.2 K, encoding=NONE, compression=NONE, seqNum=194, earliestPutTs=1732724585931 2024-11-27T16:23:06,728 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting 40ad39a02eb545bea4cb7160c8428f0b, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=194, earliestPutTs=1732724585931 2024-11-27T16:23:06,737 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 00f170535dc2739662302d98f22dc172#B#compaction#453 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-27T16:23:06,738 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/B/2889764122b6435ca4942242bbda25ec is 50, key is test_row_0/B:col10/1732724585931/Put/seqid=0 2024-11-27T16:23:06,738 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 00f170535dc2739662302d98f22dc172#A#compaction#454 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-27T16:23:06,739 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/A/a9eb5eb61caa4d80b3fd908872a17c8e is 50, key is test_row_0/A:col10/1732724585931/Put/seqid=0 2024-11-27T16:23:06,745 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742363_1539 (size=12595) 2024-11-27T16:23:06,750 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/B/2889764122b6435ca4942242bbda25ec as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/B/2889764122b6435ca4942242bbda25ec 2024-11-27T16:23:06,755 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 00f170535dc2739662302d98f22dc172/B of 00f170535dc2739662302d98f22dc172 into 2889764122b6435ca4942242bbda25ec(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T16:23:06,755 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 00f170535dc2739662302d98f22dc172: 2024-11-27T16:23:06,755 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172., storeName=00f170535dc2739662302d98f22dc172/B, priority=12, startTime=1732724586725; duration=0sec 2024-11-27T16:23:06,755 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-27T16:23:06,755 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 00f170535dc2739662302d98f22dc172:B 2024-11-27T16:23:06,755 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-27T16:23:06,760 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742364_1540 (size=12595) 2024-11-27T16:23:06,761 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48812 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-27T16:23:06,761 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HStore(1540): 00f170535dc2739662302d98f22dc172/C is initiating minor compaction (all files) 2024-11-27T16:23:06,761 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 00f170535dc2739662302d98f22dc172/C in TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172. 2024-11-27T16:23:06,761 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/C/8e9827543ccf4ab291448b32af8e18c0, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/C/19dd11dbe9db49de8303b7cfb00240d3, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/C/2d9bb4bd29d1480f9a8fba2008205144, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/C/20faed288a4c4b2aae624e73c58c80e2] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp, totalSize=47.7 K 2024-11-27T16:23:06,761 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting 8e9827543ccf4ab291448b32af8e18c0, keycount=150, bloomtype=ROW, size=12.1 K, encoding=NONE, compression=NONE, seqNum=131, earliestPutTs=1732724583793 2024-11-27T16:23:06,761 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting 19dd11dbe9db49de8303b7cfb00240d3, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=154, earliestPutTs=1732724584136 2024-11-27T16:23:06,762 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting 2d9bb4bd29d1480f9a8fba2008205144, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=171, earliestPutTs=1732724584782 2024-11-27T16:23:06,762 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting 20faed288a4c4b2aae624e73c58c80e2, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=194, earliestPutTs=1732724585931 2024-11-27T16:23:06,770 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 00f170535dc2739662302d98f22dc172#C#compaction#455 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-27T16:23:06,770 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/C/1046c5ead2e841f688deba7486999267 is 50, key is test_row_0/C:col10/1732724585931/Put/seqid=0 2024-11-27T16:23:06,774 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742365_1541 (size=12595) 2024-11-27T16:23:06,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=128 2024-11-27T16:23:06,894 INFO [Thread-2216 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 128 completed 2024-11-27T16:23:06,895 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-27T16:23:06,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] procedure2.ProcedureExecutor(1098): Stored pid=130, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=130, table=TestAcidGuarantees 2024-11-27T16:23:06,897 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=130, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=130, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-27T16:23:06,897 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=130, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=130, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-27T16:23:06,897 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=131, ppid=130, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-27T16:23:06,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=130 2024-11-27T16:23:06,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(8581): Flush requested on 00f170535dc2739662302d98f22dc172 2024-11-27T16:23:06,905 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 00f170535dc2739662302d98f22dc172 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-11-27T16:23:06,906 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 00f170535dc2739662302d98f22dc172, store=A 2024-11-27T16:23:06,906 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:23:06,906 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 00f170535dc2739662302d98f22dc172, store=B 2024-11-27T16:23:06,906 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:23:06,906 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 00f170535dc2739662302d98f22dc172, store=C 2024-11-27T16:23:06,906 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:23:06,910 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/A/b10e0d8cedc54ea3897de9a22d060719 is 50, key is test_row_0/A:col10/1732724586905/Put/seqid=0 2024-11-27T16:23:06,920 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742366_1542 (size=16931) 2024-11-27T16:23:06,921 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=210 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/A/b10e0d8cedc54ea3897de9a22d060719 2024-11-27T16:23:06,931 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/B/f61f2d6dcc5b4279860429e7e31b2c58 is 50, key is test_row_0/B:col10/1732724586905/Put/seqid=0 2024-11-27T16:23:06,941 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:06,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38730 deadline: 1732724646931, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:06,942 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742367_1543 (size=12151) 2024-11-27T16:23:06,943 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:06,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38758 deadline: 1732724646934, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:06,944 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=210 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/B/f61f2d6dcc5b4279860429e7e31b2c58 2024-11-27T16:23:06,944 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:06,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38740 deadline: 1732724646936, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:06,944 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:06,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38716 deadline: 1732724646937, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:06,950 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:06,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38754 deadline: 1732724646941, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:06,955 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/C/314d9b4e2cfc40aaa2cf314f680ffc33 is 50, key is test_row_0/C:col10/1732724586905/Put/seqid=0 2024-11-27T16:23:06,959 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742368_1544 (size=12151) 2024-11-27T16:23:06,959 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=210 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/C/314d9b4e2cfc40aaa2cf314f680ffc33 2024-11-27T16:23:06,963 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/A/b10e0d8cedc54ea3897de9a22d060719 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/A/b10e0d8cedc54ea3897de9a22d060719 2024-11-27T16:23:06,967 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/A/b10e0d8cedc54ea3897de9a22d060719, entries=250, sequenceid=210, filesize=16.5 K 2024-11-27T16:23:06,967 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/B/f61f2d6dcc5b4279860429e7e31b2c58 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/B/f61f2d6dcc5b4279860429e7e31b2c58 2024-11-27T16:23:06,973 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/B/f61f2d6dcc5b4279860429e7e31b2c58, entries=150, sequenceid=210, filesize=11.9 K 2024-11-27T16:23:06,973 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/C/314d9b4e2cfc40aaa2cf314f680ffc33 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/C/314d9b4e2cfc40aaa2cf314f680ffc33 2024-11-27T16:23:06,982 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/C/314d9b4e2cfc40aaa2cf314f680ffc33, entries=150, sequenceid=210, filesize=11.9 K 2024-11-27T16:23:06,983 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=120.76 KB/123660 for 00f170535dc2739662302d98f22dc172 in 78ms, sequenceid=210, compaction requested=false 2024-11-27T16:23:06,983 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 00f170535dc2739662302d98f22dc172: 2024-11-27T16:23:07,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=130 2024-11-27T16:23:07,050 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:23:07,050 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=131 2024-11-27T16:23:07,051 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172. 2024-11-27T16:23:07,051 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegion(2837): Flushing 00f170535dc2739662302d98f22dc172 3/3 column families, dataSize=120.76 KB heapSize=317.16 KB 2024-11-27T16:23:07,051 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 00f170535dc2739662302d98f22dc172, store=A 2024-11-27T16:23:07,051 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:23:07,051 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 00f170535dc2739662302d98f22dc172, store=B 2024-11-27T16:23:07,051 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:23:07,051 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 00f170535dc2739662302d98f22dc172, store=C 2024-11-27T16:23:07,051 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:23:07,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(8581): Flush requested on 00f170535dc2739662302d98f22dc172 2024-11-27T16:23:07,053 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172. as already flushing 2024-11-27T16:23:07,061 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/A/f05cd7f7472549b0b9cf09365ce5125a is 50, key is test_row_0/A:col10/1732724586936/Put/seqid=0 2024-11-27T16:23:07,082 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:07,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38758 deadline: 1732724647070, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:07,082 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:07,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38716 deadline: 1732724647071, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:07,086 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:07,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38754 deadline: 1732724647074, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:07,093 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:07,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38730 deadline: 1732724647079, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:07,093 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:07,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38740 deadline: 1732724647082, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:07,094 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742369_1545 (size=12151) 2024-11-27T16:23:07,095 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=231 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/A/f05cd7f7472549b0b9cf09365ce5125a 2024-11-27T16:23:07,114 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/B/d6151e51a3c0447c902244f28370520f is 50, key is test_row_0/B:col10/1732724586936/Put/seqid=0 2024-11-27T16:23:07,147 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742370_1546 (size=12151) 2024-11-27T16:23:07,168 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/A/a9eb5eb61caa4d80b3fd908872a17c8e as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/A/a9eb5eb61caa4d80b3fd908872a17c8e 2024-11-27T16:23:07,174 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 00f170535dc2739662302d98f22dc172/A of 00f170535dc2739662302d98f22dc172 into a9eb5eb61caa4d80b3fd908872a17c8e(size=12.3 K), total size for store is 28.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T16:23:07,174 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 00f170535dc2739662302d98f22dc172: 2024-11-27T16:23:07,174 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172., storeName=00f170535dc2739662302d98f22dc172/A, priority=12, startTime=1732724586724; duration=0sec 2024-11-27T16:23:07,174 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T16:23:07,174 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 00f170535dc2739662302d98f22dc172:A 2024-11-27T16:23:07,182 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/C/1046c5ead2e841f688deba7486999267 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/C/1046c5ead2e841f688deba7486999267 2024-11-27T16:23:07,187 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 00f170535dc2739662302d98f22dc172/C of 00f170535dc2739662302d98f22dc172 into 1046c5ead2e841f688deba7486999267(size=12.3 K), total size for store is 24.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T16:23:07,187 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 00f170535dc2739662302d98f22dc172: 2024-11-27T16:23:07,187 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172., storeName=00f170535dc2739662302d98f22dc172/C, priority=12, startTime=1732724586725; duration=0sec 2024-11-27T16:23:07,188 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T16:23:07,188 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 00f170535dc2739662302d98f22dc172:C 2024-11-27T16:23:07,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=130 2024-11-27T16:23:07,205 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:07,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38758 deadline: 1732724647188, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:07,206 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:07,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38754 deadline: 1732724647188, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:07,207 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:07,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38730 deadline: 1732724647194, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:07,207 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:07,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38716 deadline: 1732724647194, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:07,222 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:07,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38740 deadline: 1732724647212, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:07,415 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:07,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38758 deadline: 1732724647408, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:07,416 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:07,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38730 deadline: 1732724647408, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:07,419 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:07,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38716 deadline: 1732724647409, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:07,428 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:07,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38754 deadline: 1732724647421, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:07,430 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:07,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38740 deadline: 1732724647423, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:07,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=130 2024-11-27T16:23:07,548 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=231 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/B/d6151e51a3c0447c902244f28370520f 2024-11-27T16:23:07,570 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/C/43d1ef25bef2444dba94850911364d33 is 50, key is test_row_0/C:col10/1732724586936/Put/seqid=0 2024-11-27T16:23:07,614 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742371_1547 (size=12151) 2024-11-27T16:23:07,615 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=231 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/C/43d1ef25bef2444dba94850911364d33 2024-11-27T16:23:07,622 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/A/f05cd7f7472549b0b9cf09365ce5125a as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/A/f05cd7f7472549b0b9cf09365ce5125a 2024-11-27T16:23:07,636 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/A/f05cd7f7472549b0b9cf09365ce5125a, entries=150, sequenceid=231, filesize=11.9 K 2024-11-27T16:23:07,637 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/B/d6151e51a3c0447c902244f28370520f as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/B/d6151e51a3c0447c902244f28370520f 2024-11-27T16:23:07,642 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/B/d6151e51a3c0447c902244f28370520f, entries=150, sequenceid=231, filesize=11.9 K 2024-11-27T16:23:07,644 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/C/43d1ef25bef2444dba94850911364d33 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/C/43d1ef25bef2444dba94850911364d33 2024-11-27T16:23:07,649 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/C/43d1ef25bef2444dba94850911364d33, entries=150, sequenceid=231, filesize=11.9 K 2024-11-27T16:23:07,650 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegion(3040): Finished flush of dataSize ~120.76 KB/123660, heapSize ~317.11 KB/324720, currentSize=80.51 KB/82440 for 00f170535dc2739662302d98f22dc172 in 599ms, sequenceid=231, compaction requested=true 2024-11-27T16:23:07,650 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegion(2538): Flush status journal for 00f170535dc2739662302d98f22dc172: 2024-11-27T16:23:07,650 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172. 2024-11-27T16:23:07,650 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=131 2024-11-27T16:23:07,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4106): Remote procedure done, pid=131 2024-11-27T16:23:07,653 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=131, resume processing ppid=130 2024-11-27T16:23:07,653 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=131, ppid=130, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 755 msec 2024-11-27T16:23:07,654 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=130, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=130, table=TestAcidGuarantees in 758 msec 2024-11-27T16:23:07,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(8581): Flush requested on 00f170535dc2739662302d98f22dc172 2024-11-27T16:23:07,727 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 00f170535dc2739662302d98f22dc172 3/3 column families, dataSize=87.22 KB heapSize=229.27 KB 2024-11-27T16:23:07,727 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 00f170535dc2739662302d98f22dc172, store=A 2024-11-27T16:23:07,727 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:23:07,727 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 00f170535dc2739662302d98f22dc172, store=B 2024-11-27T16:23:07,727 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:23:07,727 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 00f170535dc2739662302d98f22dc172, store=C 2024-11-27T16:23:07,727 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:23:07,741 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/A/01c66ad47c024f4cab1cf14f7b8dee47 is 50, key is test_row_0/A:col10/1732724587725/Put/seqid=0 2024-11-27T16:23:07,789 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742372_1548 (size=14541) 2024-11-27T16:23:07,789 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=249 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/A/01c66ad47c024f4cab1cf14f7b8dee47 2024-11-27T16:23:07,797 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:07,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38730 deadline: 1732724647782, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:07,808 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/B/1888098ced4d406fb3465a20238f6b32 is 50, key is test_row_0/B:col10/1732724587725/Put/seqid=0 2024-11-27T16:23:07,808 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:07,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38716 deadline: 1732724647785, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:07,809 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:07,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38754 deadline: 1732724647785, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:07,809 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:07,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38740 deadline: 1732724647787, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:07,809 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:07,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38758 deadline: 1732724647784, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:07,855 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742373_1549 (size=12151) 2024-11-27T16:23:07,906 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:07,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38730 deadline: 1732724647901, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:07,918 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:07,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38758 deadline: 1732724647911, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:07,926 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:07,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38716 deadline: 1732724647911, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:07,927 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:07,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38754 deadline: 1732724647912, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:07,927 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:07,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38740 deadline: 1732724647912, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:08,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=130 2024-11-27T16:23:08,003 INFO [Thread-2216 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 130 completed 2024-11-27T16:23:08,004 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-27T16:23:08,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] procedure2.ProcedureExecutor(1098): Stored pid=132, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=132, table=TestAcidGuarantees 2024-11-27T16:23:08,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=132 2024-11-27T16:23:08,005 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=132, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=132, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-27T16:23:08,006 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=132, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=132, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-27T16:23:08,006 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=133, ppid=132, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-27T16:23:08,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=132 2024-11-27T16:23:08,114 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:08,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38730 deadline: 1732724648107, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:08,129 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:08,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38758 deadline: 1732724648119, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:08,135 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:08,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38716 deadline: 1732724648127, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:08,139 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:08,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38754 deadline: 1732724648129, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:08,140 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:08,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38740 deadline: 1732724648130, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:08,157 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:23:08,158 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=133 2024-11-27T16:23:08,158 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172. 2024-11-27T16:23:08,158 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172. as already flushing 2024-11-27T16:23:08,158 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172. 2024-11-27T16:23:08,158 ERROR [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] handler.RSProcedureHandler(58): pid=133 java.io.IOException: Unable to complete flush {ENCODED => 00f170535dc2739662302d98f22dc172, NAME => 'TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:23:08,158 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=133 java.io.IOException: Unable to complete flush {ENCODED => 00f170535dc2739662302d98f22dc172, NAME => 'TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:23:08,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4114): Remote procedure failed, pid=133 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 00f170535dc2739662302d98f22dc172, NAME => 'TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 00f170535dc2739662302d98f22dc172, NAME => 'TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:23:08,256 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=249 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/B/1888098ced4d406fb3465a20238f6b32 2024-11-27T16:23:08,272 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/C/cab721fa76c04a3db7a5ef73194c2bf1 is 50, key is test_row_0/C:col10/1732724587725/Put/seqid=0 2024-11-27T16:23:08,302 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742374_1550 (size=12151) 2024-11-27T16:23:08,303 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=249 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/C/cab721fa76c04a3db7a5ef73194c2bf1 2024-11-27T16:23:08,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=132 2024-11-27T16:23:08,309 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/A/01c66ad47c024f4cab1cf14f7b8dee47 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/A/01c66ad47c024f4cab1cf14f7b8dee47 2024-11-27T16:23:08,310 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:23:08,310 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=133 2024-11-27T16:23:08,311 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172. 2024-11-27T16:23:08,311 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172. as already flushing 2024-11-27T16:23:08,311 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172. 2024-11-27T16:23:08,311 ERROR [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] handler.RSProcedureHandler(58): pid=133 java.io.IOException: Unable to complete flush {ENCODED => 00f170535dc2739662302d98f22dc172, NAME => 'TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:23:08,311 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=133 java.io.IOException: Unable to complete flush {ENCODED => 00f170535dc2739662302d98f22dc172, NAME => 'TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:23:08,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4114): Remote procedure failed, pid=133 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 00f170535dc2739662302d98f22dc172, NAME => 'TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 00f170535dc2739662302d98f22dc172, NAME => 'TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:23:08,314 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/A/01c66ad47c024f4cab1cf14f7b8dee47, entries=200, sequenceid=249, filesize=14.2 K 2024-11-27T16:23:08,315 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/B/1888098ced4d406fb3465a20238f6b32 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/B/1888098ced4d406fb3465a20238f6b32 2024-11-27T16:23:08,323 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/B/1888098ced4d406fb3465a20238f6b32, entries=150, sequenceid=249, filesize=11.9 K 2024-11-27T16:23:08,325 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/C/cab721fa76c04a3db7a5ef73194c2bf1 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/C/cab721fa76c04a3db7a5ef73194c2bf1 2024-11-27T16:23:08,333 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/C/cab721fa76c04a3db7a5ef73194c2bf1, entries=150, sequenceid=249, filesize=11.9 K 2024-11-27T16:23:08,334 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~87.22 KB/89310, heapSize ~229.22 KB/234720, currentSize=114.05 KB/116790 for 00f170535dc2739662302d98f22dc172 in 608ms, sequenceid=249, compaction requested=true 2024-11-27T16:23:08,334 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 00f170535dc2739662302d98f22dc172: 2024-11-27T16:23:08,334 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-27T16:23:08,335 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 00f170535dc2739662302d98f22dc172:A, priority=-2147483648, current under compaction store size is 1 2024-11-27T16:23:08,335 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T16:23:08,335 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-27T16:23:08,335 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 00f170535dc2739662302d98f22dc172:B, priority=-2147483648, current under compaction store size is 2 2024-11-27T16:23:08,335 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T16:23:08,335 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 00f170535dc2739662302d98f22dc172:C, priority=-2147483648, current under compaction store size is 3 2024-11-27T16:23:08,336 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-27T16:23:08,338 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 56218 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-27T16:23:08,338 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HStore(1540): 00f170535dc2739662302d98f22dc172/A is initiating minor compaction (all files) 2024-11-27T16:23:08,338 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 00f170535dc2739662302d98f22dc172/A in TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172. 2024-11-27T16:23:08,338 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/A/a9eb5eb61caa4d80b3fd908872a17c8e, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/A/b10e0d8cedc54ea3897de9a22d060719, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/A/f05cd7f7472549b0b9cf09365ce5125a, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/A/01c66ad47c024f4cab1cf14f7b8dee47] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp, totalSize=54.9 K 2024-11-27T16:23:08,339 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.Compactor(224): Compacting a9eb5eb61caa4d80b3fd908872a17c8e, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=194, earliestPutTs=1732724585931 2024-11-27T16:23:08,339 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49048 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-27T16:23:08,339 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HStore(1540): 00f170535dc2739662302d98f22dc172/B is initiating minor compaction (all files) 2024-11-27T16:23:08,339 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 00f170535dc2739662302d98f22dc172/B in TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172. 2024-11-27T16:23:08,339 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/B/2889764122b6435ca4942242bbda25ec, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/B/f61f2d6dcc5b4279860429e7e31b2c58, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/B/d6151e51a3c0447c902244f28370520f, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/B/1888098ced4d406fb3465a20238f6b32] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp, totalSize=47.9 K 2024-11-27T16:23:08,340 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.Compactor(224): Compacting b10e0d8cedc54ea3897de9a22d060719, keycount=250, bloomtype=ROW, size=16.5 K, encoding=NONE, compression=NONE, seqNum=210, earliestPutTs=1732724586278 2024-11-27T16:23:08,340 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.Compactor(224): Compacting f05cd7f7472549b0b9cf09365ce5125a, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=231, earliestPutTs=1732724586930 2024-11-27T16:23:08,340 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting 2889764122b6435ca4942242bbda25ec, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=194, earliestPutTs=1732724585931 2024-11-27T16:23:08,341 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting f61f2d6dcc5b4279860429e7e31b2c58, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=210, earliestPutTs=1732724586279 2024-11-27T16:23:08,342 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 01c66ad47c024f4cab1cf14f7b8dee47, keycount=200, bloomtype=ROW, size=14.2 K, encoding=NONE, compression=NONE, seqNum=249, earliestPutTs=1732724587072 2024-11-27T16:23:08,342 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting d6151e51a3c0447c902244f28370520f, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=231, earliestPutTs=1732724586930 2024-11-27T16:23:08,343 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting 1888098ced4d406fb3465a20238f6b32, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=249, earliestPutTs=1732724587076 2024-11-27T16:23:08,362 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 00f170535dc2739662302d98f22dc172#A#compaction#465 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-27T16:23:08,363 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/A/ed34516e66b6428bbd1d8e513f925009 is 50, key is test_row_0/A:col10/1732724587725/Put/seqid=0 2024-11-27T16:23:08,375 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 00f170535dc2739662302d98f22dc172#B#compaction#466 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-27T16:23:08,375 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/B/6fd143f2831c4ec584f85039a5c43041 is 50, key is test_row_0/B:col10/1732724587725/Put/seqid=0 2024-11-27T16:23:08,418 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742375_1551 (size=12731) 2024-11-27T16:23:08,424 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/A/ed34516e66b6428bbd1d8e513f925009 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/A/ed34516e66b6428bbd1d8e513f925009 2024-11-27T16:23:08,426 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 00f170535dc2739662302d98f22dc172 3/3 column families, dataSize=120.76 KB heapSize=317.16 KB 2024-11-27T16:23:08,426 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 00f170535dc2739662302d98f22dc172, store=A 2024-11-27T16:23:08,426 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:23:08,426 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 00f170535dc2739662302d98f22dc172, store=B 2024-11-27T16:23:08,426 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:23:08,426 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 00f170535dc2739662302d98f22dc172, store=C 2024-11-27T16:23:08,426 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:23:08,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(8581): Flush requested on 00f170535dc2739662302d98f22dc172 2024-11-27T16:23:08,438 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/A/faffc142ce0940168c65558d0c4a1e73 is 50, key is test_row_0/A:col10/1732724588423/Put/seqid=0 2024-11-27T16:23:08,438 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 00f170535dc2739662302d98f22dc172/A of 00f170535dc2739662302d98f22dc172 into ed34516e66b6428bbd1d8e513f925009(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T16:23:08,438 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 00f170535dc2739662302d98f22dc172: 2024-11-27T16:23:08,438 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172., storeName=00f170535dc2739662302d98f22dc172/A, priority=12, startTime=1732724588334; duration=0sec 2024-11-27T16:23:08,439 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-27T16:23:08,439 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 00f170535dc2739662302d98f22dc172:A 2024-11-27T16:23:08,439 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-27T16:23:08,441 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742376_1552 (size=12731) 2024-11-27T16:23:08,442 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49048 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-27T16:23:08,442 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HStore(1540): 00f170535dc2739662302d98f22dc172/C is initiating minor compaction (all files) 2024-11-27T16:23:08,442 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 00f170535dc2739662302d98f22dc172/C in TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172. 2024-11-27T16:23:08,443 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/C/1046c5ead2e841f688deba7486999267, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/C/314d9b4e2cfc40aaa2cf314f680ffc33, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/C/43d1ef25bef2444dba94850911364d33, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/C/cab721fa76c04a3db7a5ef73194c2bf1] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp, totalSize=47.9 K 2024-11-27T16:23:08,443 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1046c5ead2e841f688deba7486999267, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=194, earliestPutTs=1732724585931 2024-11-27T16:23:08,443 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 314d9b4e2cfc40aaa2cf314f680ffc33, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=210, earliestPutTs=1732724586279 2024-11-27T16:23:08,444 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 43d1ef25bef2444dba94850911364d33, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=231, earliestPutTs=1732724586930 2024-11-27T16:23:08,444 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.Compactor(224): Compacting cab721fa76c04a3db7a5ef73194c2bf1, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=249, earliestPutTs=1732724587076 2024-11-27T16:23:08,463 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:23:08,463 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=133 2024-11-27T16:23:08,464 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172. 2024-11-27T16:23:08,464 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172. as already flushing 2024-11-27T16:23:08,464 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172. 2024-11-27T16:23:08,464 ERROR [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] handler.RSProcedureHandler(58): pid=133 java.io.IOException: Unable to complete flush {ENCODED => 00f170535dc2739662302d98f22dc172, NAME => 'TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:23:08,464 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=133 java.io.IOException: Unable to complete flush {ENCODED => 00f170535dc2739662302d98f22dc172, NAME => 'TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:23:08,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4114): Remote procedure failed, pid=133 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 00f170535dc2739662302d98f22dc172, NAME => 'TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 00f170535dc2739662302d98f22dc172, NAME => 'TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:23:08,468 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:08,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38730 deadline: 1732724648459, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:08,469 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:08,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38716 deadline: 1732724648460, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:08,470 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742377_1553 (size=14741) 2024-11-27T16:23:08,470 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:08,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38754 deadline: 1732724648462, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:08,471 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=270 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/A/faffc142ce0940168c65558d0c4a1e73 2024-11-27T16:23:08,479 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:08,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38740 deadline: 1732724648464, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:08,480 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:08,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38758 deadline: 1732724648466, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:08,483 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 00f170535dc2739662302d98f22dc172#C#compaction#468 average throughput is 2.18 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-27T16:23:08,483 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/C/bf08b6c45de542c5bc5a4303df36b7af is 50, key is test_row_0/C:col10/1732724587725/Put/seqid=0 2024-11-27T16:23:08,497 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/B/d7ca5b78a16b4ae688deb62684eb0747 is 50, key is test_row_0/B:col10/1732724588423/Put/seqid=0 2024-11-27T16:23:08,534 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742378_1554 (size=12731) 2024-11-27T16:23:08,542 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/C/bf08b6c45de542c5bc5a4303df36b7af as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/C/bf08b6c45de542c5bc5a4303df36b7af 2024-11-27T16:23:08,549 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 00f170535dc2739662302d98f22dc172/C of 00f170535dc2739662302d98f22dc172 into bf08b6c45de542c5bc5a4303df36b7af(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T16:23:08,549 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 00f170535dc2739662302d98f22dc172: 2024-11-27T16:23:08,549 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172., storeName=00f170535dc2739662302d98f22dc172/C, priority=12, startTime=1732724588335; duration=0sec 2024-11-27T16:23:08,549 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T16:23:08,550 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 00f170535dc2739662302d98f22dc172:C 2024-11-27T16:23:08,566 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742379_1555 (size=12301) 2024-11-27T16:23:08,580 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:08,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38730 deadline: 1732724648570, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:08,581 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:08,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38716 deadline: 1732724648571, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:08,582 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:08,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38754 deadline: 1732724648571, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:08,586 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:08,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38740 deadline: 1732724648581, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:08,594 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:08,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38758 deadline: 1732724648581, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:08,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=132 2024-11-27T16:23:08,617 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:23:08,618 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=133 2024-11-27T16:23:08,618 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172. 2024-11-27T16:23:08,618 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172. as already flushing 2024-11-27T16:23:08,618 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172. 2024-11-27T16:23:08,618 ERROR [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] handler.RSProcedureHandler(58): pid=133 java.io.IOException: Unable to complete flush {ENCODED => 00f170535dc2739662302d98f22dc172, NAME => 'TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:23:08,618 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=133 java.io.IOException: Unable to complete flush {ENCODED => 00f170535dc2739662302d98f22dc172, NAME => 'TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:23:08,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4114): Remote procedure failed, pid=133 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 00f170535dc2739662302d98f22dc172, NAME => 'TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 00f170535dc2739662302d98f22dc172, NAME => 'TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:23:08,771 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:23:08,771 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=133 2024-11-27T16:23:08,772 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172. 2024-11-27T16:23:08,772 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172. as already flushing 2024-11-27T16:23:08,772 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172. 2024-11-27T16:23:08,772 ERROR [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] handler.RSProcedureHandler(58): pid=133 java.io.IOException: Unable to complete flush {ENCODED => 00f170535dc2739662302d98f22dc172, NAME => 'TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:23:08,772 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=133 java.io.IOException: Unable to complete flush {ENCODED => 00f170535dc2739662302d98f22dc172, NAME => 'TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:23:08,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4114): Remote procedure failed, pid=133 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 00f170535dc2739662302d98f22dc172, NAME => 'TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 00f170535dc2739662302d98f22dc172, NAME => 'TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:23:08,793 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:08,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38730 deadline: 1732724648784, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:08,793 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:08,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38716 deadline: 1732724648784, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:08,794 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:08,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38754 deadline: 1732724648785, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:08,796 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:08,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38740 deadline: 1732724648789, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:08,804 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:08,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38758 deadline: 1732724648797, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:08,846 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/B/6fd143f2831c4ec584f85039a5c43041 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/B/6fd143f2831c4ec584f85039a5c43041 2024-11-27T16:23:08,851 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 00f170535dc2739662302d98f22dc172/B of 00f170535dc2739662302d98f22dc172 into 6fd143f2831c4ec584f85039a5c43041(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T16:23:08,851 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 00f170535dc2739662302d98f22dc172: 2024-11-27T16:23:08,851 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172., storeName=00f170535dc2739662302d98f22dc172/B, priority=12, startTime=1732724588335; duration=0sec 2024-11-27T16:23:08,851 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T16:23:08,851 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 00f170535dc2739662302d98f22dc172:B 2024-11-27T16:23:08,924 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:23:08,942 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=133 2024-11-27T16:23:08,943 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172. 2024-11-27T16:23:08,943 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172. as already flushing 2024-11-27T16:23:08,943 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172. 2024-11-27T16:23:08,943 ERROR [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] handler.RSProcedureHandler(58): pid=133 java.io.IOException: Unable to complete flush {ENCODED => 00f170535dc2739662302d98f22dc172, NAME => 'TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:23:08,943 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=133 java.io.IOException: Unable to complete flush {ENCODED => 00f170535dc2739662302d98f22dc172, NAME => 'TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:23:08,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4114): Remote procedure failed, pid=133 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 00f170535dc2739662302d98f22dc172, NAME => 'TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 00f170535dc2739662302d98f22dc172, NAME => 'TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:23:08,967 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=270 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/B/d7ca5b78a16b4ae688deb62684eb0747 2024-11-27T16:23:08,976 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/C/3441ad56dc0044d4badd5e09d7e6dcff is 50, key is test_row_0/C:col10/1732724588423/Put/seqid=0 2024-11-27T16:23:09,008 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742380_1556 (size=12301) 2024-11-27T16:23:09,096 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:23:09,097 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=133 2024-11-27T16:23:09,097 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172. 2024-11-27T16:23:09,097 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172. as already flushing 2024-11-27T16:23:09,097 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172. 2024-11-27T16:23:09,097 ERROR [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] handler.RSProcedureHandler(58): pid=133 java.io.IOException: Unable to complete flush {ENCODED => 00f170535dc2739662302d98f22dc172, NAME => 'TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:23:09,097 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=133 java.io.IOException: Unable to complete flush {ENCODED => 00f170535dc2739662302d98f22dc172, NAME => 'TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:23:09,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4114): Remote procedure failed, pid=133 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 00f170535dc2739662302d98f22dc172, NAME => 'TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 00f170535dc2739662302d98f22dc172, NAME => 'TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:23:09,104 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:09,104 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:09,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38754 deadline: 1732724649096, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:09,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38730 deadline: 1732724649094, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:09,104 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:09,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38716 deadline: 1732724649096, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:09,105 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:09,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38740 deadline: 1732724649100, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:09,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=132 2024-11-27T16:23:09,113 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:09,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38758 deadline: 1732724649106, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:09,249 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:23:09,250 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=133 2024-11-27T16:23:09,250 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172. 2024-11-27T16:23:09,250 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172. as already flushing 2024-11-27T16:23:09,250 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172. 2024-11-27T16:23:09,250 ERROR [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] handler.RSProcedureHandler(58): pid=133 java.io.IOException: Unable to complete flush {ENCODED => 00f170535dc2739662302d98f22dc172, NAME => 'TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:23:09,250 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=133 java.io.IOException: Unable to complete flush {ENCODED => 00f170535dc2739662302d98f22dc172, NAME => 'TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:23:09,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4114): Remote procedure failed, pid=133 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 00f170535dc2739662302d98f22dc172, NAME => 'TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 00f170535dc2739662302d98f22dc172, NAME => 'TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:23:09,402 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:23:09,404 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=133 2024-11-27T16:23:09,404 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172. 2024-11-27T16:23:09,404 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172. as already flushing 2024-11-27T16:23:09,404 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172. 2024-11-27T16:23:09,404 ERROR [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] handler.RSProcedureHandler(58): pid=133 java.io.IOException: Unable to complete flush {ENCODED => 00f170535dc2739662302d98f22dc172, NAME => 'TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:23:09,404 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=133 java.io.IOException: Unable to complete flush {ENCODED => 00f170535dc2739662302d98f22dc172, NAME => 'TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:23:09,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4114): Remote procedure failed, pid=133 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 00f170535dc2739662302d98f22dc172, NAME => 'TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 00f170535dc2739662302d98f22dc172, NAME => 'TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:23:09,409 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=270 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/C/3441ad56dc0044d4badd5e09d7e6dcff 2024-11-27T16:23:09,415 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/A/faffc142ce0940168c65558d0c4a1e73 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/A/faffc142ce0940168c65558d0c4a1e73 2024-11-27T16:23:09,421 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/A/faffc142ce0940168c65558d0c4a1e73, entries=200, sequenceid=270, filesize=14.4 K 2024-11-27T16:23:09,422 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/B/d7ca5b78a16b4ae688deb62684eb0747 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/B/d7ca5b78a16b4ae688deb62684eb0747 2024-11-27T16:23:09,427 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/B/d7ca5b78a16b4ae688deb62684eb0747, entries=150, sequenceid=270, filesize=12.0 K 2024-11-27T16:23:09,429 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/C/3441ad56dc0044d4badd5e09d7e6dcff as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/C/3441ad56dc0044d4badd5e09d7e6dcff 2024-11-27T16:23:09,434 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/C/3441ad56dc0044d4badd5e09d7e6dcff, entries=150, sequenceid=270, filesize=12.0 K 2024-11-27T16:23:09,436 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~120.76 KB/123660, heapSize ~317.11 KB/324720, currentSize=80.51 KB/82440 for 00f170535dc2739662302d98f22dc172 in 1010ms, sequenceid=270, compaction requested=false 2024-11-27T16:23:09,436 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 00f170535dc2739662302d98f22dc172: 2024-11-27T16:23:09,558 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:23:09,558 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=133 2024-11-27T16:23:09,559 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172. 2024-11-27T16:23:09,559 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegion(2837): Flushing 00f170535dc2739662302d98f22dc172 3/3 column families, dataSize=80.51 KB heapSize=211.69 KB 2024-11-27T16:23:09,559 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 00f170535dc2739662302d98f22dc172, store=A 2024-11-27T16:23:09,559 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:23:09,559 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 00f170535dc2739662302d98f22dc172, store=B 2024-11-27T16:23:09,559 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:23:09,560 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 00f170535dc2739662302d98f22dc172, store=C 2024-11-27T16:23:09,560 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:23:09,580 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/A/33939db86a7c4d59a6de3f1599b59ca8 is 50, key is test_row_0/A:col10/1732724588464/Put/seqid=0 2024-11-27T16:23:09,619 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172. as already flushing 2024-11-27T16:23:09,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(8581): Flush requested on 00f170535dc2739662302d98f22dc172 2024-11-27T16:23:09,629 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742381_1557 (size=12301) 2024-11-27T16:23:09,630 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=288 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/A/33939db86a7c4d59a6de3f1599b59ca8 2024-11-27T16:23:09,649 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/B/0339fd9cc08045e1bffab4f8d96b322d is 50, key is test_row_0/B:col10/1732724588464/Put/seqid=0 2024-11-27T16:23:09,670 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:09,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38716 deadline: 1732724649658, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:09,671 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:09,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38758 deadline: 1732724649660, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:09,672 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:09,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38730 deadline: 1732724649661, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:09,682 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:09,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38740 deadline: 1732724649670, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:09,682 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:09,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38754 deadline: 1732724649669, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:09,687 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742382_1558 (size=12301) 2024-11-27T16:23:09,687 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=288 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/B/0339fd9cc08045e1bffab4f8d96b322d 2024-11-27T16:23:09,694 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/C/aa00dc7007ad490298c8a020931bd42e is 50, key is test_row_0/C:col10/1732724588464/Put/seqid=0 2024-11-27T16:23:09,717 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742383_1559 (size=12301) 2024-11-27T16:23:09,718 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=288 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/C/aa00dc7007ad490298c8a020931bd42e 2024-11-27T16:23:09,723 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/A/33939db86a7c4d59a6de3f1599b59ca8 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/A/33939db86a7c4d59a6de3f1599b59ca8 2024-11-27T16:23:09,730 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/A/33939db86a7c4d59a6de3f1599b59ca8, entries=150, sequenceid=288, filesize=12.0 K 2024-11-27T16:23:09,733 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/B/0339fd9cc08045e1bffab4f8d96b322d as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/B/0339fd9cc08045e1bffab4f8d96b322d 2024-11-27T16:23:09,738 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/B/0339fd9cc08045e1bffab4f8d96b322d, entries=150, sequenceid=288, filesize=12.0 K 2024-11-27T16:23:09,739 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/C/aa00dc7007ad490298c8a020931bd42e as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/C/aa00dc7007ad490298c8a020931bd42e 2024-11-27T16:23:09,747 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/C/aa00dc7007ad490298c8a020931bd42e, entries=150, sequenceid=288, filesize=12.0 K 2024-11-27T16:23:09,749 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=120.76 KB/123660 for 00f170535dc2739662302d98f22dc172 in 190ms, sequenceid=288, compaction requested=true 2024-11-27T16:23:09,750 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegion(2538): Flush status journal for 00f170535dc2739662302d98f22dc172: 2024-11-27T16:23:09,750 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172. 2024-11-27T16:23:09,750 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=133 2024-11-27T16:23:09,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4106): Remote procedure done, pid=133 2024-11-27T16:23:09,754 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=133, resume processing ppid=132 2024-11-27T16:23:09,754 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=133, ppid=132, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.7450 sec 2024-11-27T16:23:09,756 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=132, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=132, table=TestAcidGuarantees in 1.7510 sec 2024-11-27T16:23:09,779 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 00f170535dc2739662302d98f22dc172 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-11-27T16:23:09,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(8581): Flush requested on 00f170535dc2739662302d98f22dc172 2024-11-27T16:23:09,784 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 00f170535dc2739662302d98f22dc172, store=A 2024-11-27T16:23:09,785 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:23:09,785 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 00f170535dc2739662302d98f22dc172, store=B 2024-11-27T16:23:09,785 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:23:09,785 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 00f170535dc2739662302d98f22dc172, store=C 2024-11-27T16:23:09,785 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:23:09,794 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/A/a50eeb12c4ce4f9598ccdf3d679eb990 is 50, key is test_row_0/A:col10/1732724589666/Put/seqid=0 2024-11-27T16:23:09,817 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:09,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38716 deadline: 1732724649801, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:09,817 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:09,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38740 deadline: 1732724649809, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:09,828 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:09,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38754 deadline: 1732724649815, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:09,829 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:09,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38730 deadline: 1732724649816, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:09,830 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:09,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38758 deadline: 1732724649816, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:09,832 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742384_1560 (size=14741) 2024-11-27T16:23:09,834 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=311 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/A/a50eeb12c4ce4f9598ccdf3d679eb990 2024-11-27T16:23:09,847 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/B/e5b45599a6e3479fb1462d100260bee8 is 50, key is test_row_0/B:col10/1732724589666/Put/seqid=0 2024-11-27T16:23:09,859 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742385_1561 (size=12301) 2024-11-27T16:23:09,925 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:09,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38740 deadline: 1732724649919, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:09,926 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:09,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38716 deadline: 1732724649919, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:09,936 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:09,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38754 deadline: 1732724649930, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:09,939 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:09,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38730 deadline: 1732724649932, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:09,941 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:09,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38758 deadline: 1732724649934, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:10,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=132 2024-11-27T16:23:10,121 INFO [Thread-2216 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 132 completed 2024-11-27T16:23:10,122 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-27T16:23:10,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] procedure2.ProcedureExecutor(1098): Stored pid=134, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=134, table=TestAcidGuarantees 2024-11-27T16:23:10,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=134 2024-11-27T16:23:10,124 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=134, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=134, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-27T16:23:10,124 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=134, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=134, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-27T16:23:10,124 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=135, ppid=134, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-27T16:23:10,137 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:10,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38740 deadline: 1732724650128, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:10,137 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:10,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38716 deadline: 1732724650128, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:10,138 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:10,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38754 deadline: 1732724650138, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:10,157 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:10,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38730 deadline: 1732724650141, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:10,158 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:10,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38758 deadline: 1732724650143, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:10,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=134 2024-11-27T16:23:10,260 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=311 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/B/e5b45599a6e3479fb1462d100260bee8 2024-11-27T16:23:10,275 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:23:10,275 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=135 2024-11-27T16:23:10,276 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172. 2024-11-27T16:23:10,276 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172. as already flushing 2024-11-27T16:23:10,276 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172. 2024-11-27T16:23:10,276 ERROR [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] handler.RSProcedureHandler(58): pid=135 java.io.IOException: Unable to complete flush {ENCODED => 00f170535dc2739662302d98f22dc172, NAME => 'TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:23:10,276 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=135 java.io.IOException: Unable to complete flush {ENCODED => 00f170535dc2739662302d98f22dc172, NAME => 'TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:23:10,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4114): Remote procedure failed, pid=135 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 00f170535dc2739662302d98f22dc172, NAME => 'TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 00f170535dc2739662302d98f22dc172, NAME => 'TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:23:10,285 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/C/8ebf6f1914084a51aeb9fe8fe02c5faf is 50, key is test_row_0/C:col10/1732724589666/Put/seqid=0 2024-11-27T16:23:10,315 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742386_1562 (size=12301) 2024-11-27T16:23:10,316 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=311 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/C/8ebf6f1914084a51aeb9fe8fe02c5faf 2024-11-27T16:23:10,321 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/A/a50eeb12c4ce4f9598ccdf3d679eb990 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/A/a50eeb12c4ce4f9598ccdf3d679eb990 2024-11-27T16:23:10,327 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/A/a50eeb12c4ce4f9598ccdf3d679eb990, entries=200, sequenceid=311, filesize=14.4 K 2024-11-27T16:23:10,329 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/B/e5b45599a6e3479fb1462d100260bee8 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/B/e5b45599a6e3479fb1462d100260bee8 2024-11-27T16:23:10,339 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/B/e5b45599a6e3479fb1462d100260bee8, entries=150, sequenceid=311, filesize=12.0 K 2024-11-27T16:23:10,341 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/C/8ebf6f1914084a51aeb9fe8fe02c5faf as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/C/8ebf6f1914084a51aeb9fe8fe02c5faf 2024-11-27T16:23:10,353 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/C/8ebf6f1914084a51aeb9fe8fe02c5faf, entries=150, sequenceid=311, filesize=12.0 K 2024-11-27T16:23:10,356 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=73.80 KB/75570 for 00f170535dc2739662302d98f22dc172 in 577ms, sequenceid=311, compaction requested=true 2024-11-27T16:23:10,356 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 00f170535dc2739662302d98f22dc172: 2024-11-27T16:23:10,356 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-27T16:23:10,357 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 54514 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-27T16:23:10,357 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HStore(1540): 00f170535dc2739662302d98f22dc172/A is initiating minor compaction (all files) 2024-11-27T16:23:10,358 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 00f170535dc2739662302d98f22dc172/A in TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172. 2024-11-27T16:23:10,358 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/A/ed34516e66b6428bbd1d8e513f925009, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/A/faffc142ce0940168c65558d0c4a1e73, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/A/33939db86a7c4d59a6de3f1599b59ca8, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/A/a50eeb12c4ce4f9598ccdf3d679eb990] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp, totalSize=53.2 K 2024-11-27T16:23:10,358 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.Compactor(224): Compacting ed34516e66b6428bbd1d8e513f925009, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=249, earliestPutTs=1732724587076 2024-11-27T16:23:10,358 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.Compactor(224): Compacting faffc142ce0940168c65558d0c4a1e73, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=270, earliestPutTs=1732724587743 2024-11-27T16:23:10,359 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 33939db86a7c4d59a6de3f1599b59ca8, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=288, earliestPutTs=1732724588460 2024-11-27T16:23:10,359 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.Compactor(224): Compacting a50eeb12c4ce4f9598ccdf3d679eb990, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=311, earliestPutTs=1732724589664 2024-11-27T16:23:10,360 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 00f170535dc2739662302d98f22dc172:A, priority=-2147483648, current under compaction store size is 1 2024-11-27T16:23:10,360 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T16:23:10,361 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-27T16:23:10,361 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 00f170535dc2739662302d98f22dc172:B, priority=-2147483648, current under compaction store size is 2 2024-11-27T16:23:10,361 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T16:23:10,361 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 00f170535dc2739662302d98f22dc172:C, priority=-2147483648, current under compaction store size is 3 2024-11-27T16:23:10,361 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-27T16:23:10,377 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49634 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-27T16:23:10,378 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HStore(1540): 00f170535dc2739662302d98f22dc172/B is initiating minor compaction (all files) 2024-11-27T16:23:10,378 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 00f170535dc2739662302d98f22dc172/B in TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172. 2024-11-27T16:23:10,378 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/B/6fd143f2831c4ec584f85039a5c43041, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/B/d7ca5b78a16b4ae688deb62684eb0747, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/B/0339fd9cc08045e1bffab4f8d96b322d, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/B/e5b45599a6e3479fb1462d100260bee8] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp, totalSize=48.5 K 2024-11-27T16:23:10,378 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting 6fd143f2831c4ec584f85039a5c43041, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=249, earliestPutTs=1732724587076 2024-11-27T16:23:10,379 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting d7ca5b78a16b4ae688deb62684eb0747, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=270, earliestPutTs=1732724587743 2024-11-27T16:23:10,379 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting 0339fd9cc08045e1bffab4f8d96b322d, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=288, earliestPutTs=1732724588460 2024-11-27T16:23:10,380 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting e5b45599a6e3479fb1462d100260bee8, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=311, earliestPutTs=1732724589666 2024-11-27T16:23:10,399 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 00f170535dc2739662302d98f22dc172#A#compaction#477 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-27T16:23:10,400 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/A/2091a4c80c6b4793acb1641a1364d922 is 50, key is test_row_0/A:col10/1732724589666/Put/seqid=0 2024-11-27T16:23:10,402 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 00f170535dc2739662302d98f22dc172#B#compaction#478 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-27T16:23:10,403 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/B/9aa1588197e347659688112ad43f5d17 is 50, key is test_row_0/B:col10/1732724589666/Put/seqid=0 2024-11-27T16:23:10,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=134 2024-11-27T16:23:10,432 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:23:10,433 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=135 2024-11-27T16:23:10,433 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172. 2024-11-27T16:23:10,433 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegion(2837): Flushing 00f170535dc2739662302d98f22dc172 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-11-27T16:23:10,433 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 00f170535dc2739662302d98f22dc172, store=A 2024-11-27T16:23:10,433 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:23:10,433 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 00f170535dc2739662302d98f22dc172, store=B 2024-11-27T16:23:10,433 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:23:10,433 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 00f170535dc2739662302d98f22dc172, store=C 2024-11-27T16:23:10,433 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:23:10,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(8581): Flush requested on 00f170535dc2739662302d98f22dc172 2024-11-27T16:23:10,447 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172. as already flushing 2024-11-27T16:23:10,452 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742387_1563 (size=13017) 2024-11-27T16:23:10,458 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/A/2f4b913bddc74ea39c7b610cd249b1c5 is 50, key is test_row_0/A:col10/1732724589814/Put/seqid=0 2024-11-27T16:23:10,462 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742388_1564 (size=13017) 2024-11-27T16:23:10,469 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/B/9aa1588197e347659688112ad43f5d17 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/B/9aa1588197e347659688112ad43f5d17 2024-11-27T16:23:10,476 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 00f170535dc2739662302d98f22dc172/B of 00f170535dc2739662302d98f22dc172 into 9aa1588197e347659688112ad43f5d17(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T16:23:10,476 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 00f170535dc2739662302d98f22dc172: 2024-11-27T16:23:10,476 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172., storeName=00f170535dc2739662302d98f22dc172/B, priority=12, startTime=1732724590360; duration=0sec 2024-11-27T16:23:10,476 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-27T16:23:10,476 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 00f170535dc2739662302d98f22dc172:B 2024-11-27T16:23:10,476 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-27T16:23:10,481 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49634 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-27T16:23:10,481 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HStore(1540): 00f170535dc2739662302d98f22dc172/C is initiating minor compaction (all files) 2024-11-27T16:23:10,481 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 00f170535dc2739662302d98f22dc172/C in TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172. 2024-11-27T16:23:10,481 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/C/bf08b6c45de542c5bc5a4303df36b7af, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/C/3441ad56dc0044d4badd5e09d7e6dcff, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/C/aa00dc7007ad490298c8a020931bd42e, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/C/8ebf6f1914084a51aeb9fe8fe02c5faf] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp, totalSize=48.5 K 2024-11-27T16:23:10,481 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting bf08b6c45de542c5bc5a4303df36b7af, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=249, earliestPutTs=1732724587076 2024-11-27T16:23:10,482 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting 3441ad56dc0044d4badd5e09d7e6dcff, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=270, earliestPutTs=1732724587743 2024-11-27T16:23:10,482 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting aa00dc7007ad490298c8a020931bd42e, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=288, earliestPutTs=1732724588460 2024-11-27T16:23:10,482 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting 8ebf6f1914084a51aeb9fe8fe02c5faf, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=311, earliestPutTs=1732724589666 2024-11-27T16:23:10,495 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 00f170535dc2739662302d98f22dc172#C#compaction#480 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-27T16:23:10,495 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/C/325e959dab6a410d824f264134169cf5 is 50, key is test_row_0/C:col10/1732724589666/Put/seqid=0 2024-11-27T16:23:10,496 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742389_1565 (size=12301) 2024-11-27T16:23:10,509 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742390_1566 (size=13017) 2024-11-27T16:23:10,558 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:10,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38740 deadline: 1732724650548, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:10,569 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:10,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38730 deadline: 1732724650553, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:10,570 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:10,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38754 deadline: 1732724650554, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:10,571 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:10,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38758 deadline: 1732724650558, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:10,571 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:10,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38716 deadline: 1732724650559, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:10,668 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:10,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38740 deadline: 1732724650661, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:10,679 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:10,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38758 deadline: 1732724650672, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:10,679 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:10,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38754 deadline: 1732724650672, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:10,680 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:10,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38730 deadline: 1732724650672, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:10,683 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:10,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38716 deadline: 1732724650673, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:10,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=134 2024-11-27T16:23:10,858 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/A/2091a4c80c6b4793acb1641a1364d922 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/A/2091a4c80c6b4793acb1641a1364d922 2024-11-27T16:23:10,862 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 00f170535dc2739662302d98f22dc172/A of 00f170535dc2739662302d98f22dc172 into 2091a4c80c6b4793acb1641a1364d922(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T16:23:10,862 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 00f170535dc2739662302d98f22dc172: 2024-11-27T16:23:10,862 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172., storeName=00f170535dc2739662302d98f22dc172/A, priority=12, startTime=1732724590356; duration=0sec 2024-11-27T16:23:10,862 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T16:23:10,863 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 00f170535dc2739662302d98f22dc172:A 2024-11-27T16:23:10,878 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:10,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38740 deadline: 1732724650871, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:10,888 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:10,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38758 deadline: 1732724650881, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:10,888 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:10,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38754 deadline: 1732724650881, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:10,889 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:10,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38730 deadline: 1732724650881, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:10,892 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:10,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38716 deadline: 1732724650885, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:10,897 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=325 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/A/2f4b913bddc74ea39c7b610cd249b1c5 2024-11-27T16:23:10,914 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/B/1c37a4a5a94a416fa4baefdba30ad0cb is 50, key is test_row_0/B:col10/1732724589814/Put/seqid=0 2024-11-27T16:23:10,915 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/C/325e959dab6a410d824f264134169cf5 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/C/325e959dab6a410d824f264134169cf5 2024-11-27T16:23:10,920 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 00f170535dc2739662302d98f22dc172/C of 00f170535dc2739662302d98f22dc172 into 325e959dab6a410d824f264134169cf5(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T16:23:10,920 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 00f170535dc2739662302d98f22dc172: 2024-11-27T16:23:10,920 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172., storeName=00f170535dc2739662302d98f22dc172/C, priority=12, startTime=1732724590361; duration=0sec 2024-11-27T16:23:10,920 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T16:23:10,920 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 00f170535dc2739662302d98f22dc172:C 2024-11-27T16:23:10,946 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742391_1567 (size=12301) 2024-11-27T16:23:10,947 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=325 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/B/1c37a4a5a94a416fa4baefdba30ad0cb 2024-11-27T16:23:10,966 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/C/a0052fb15f9c469fa44cf1350f650d12 is 50, key is test_row_0/C:col10/1732724589814/Put/seqid=0 2024-11-27T16:23:11,005 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742392_1568 (size=12301) 2024-11-27T16:23:11,006 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=325 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/C/a0052fb15f9c469fa44cf1350f650d12 2024-11-27T16:23:11,021 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/A/2f4b913bddc74ea39c7b610cd249b1c5 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/A/2f4b913bddc74ea39c7b610cd249b1c5 2024-11-27T16:23:11,026 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/A/2f4b913bddc74ea39c7b610cd249b1c5, entries=150, sequenceid=325, filesize=12.0 K 2024-11-27T16:23:11,027 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/B/1c37a4a5a94a416fa4baefdba30ad0cb as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/B/1c37a4a5a94a416fa4baefdba30ad0cb 2024-11-27T16:23:11,031 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/B/1c37a4a5a94a416fa4baefdba30ad0cb, entries=150, sequenceid=325, filesize=12.0 K 2024-11-27T16:23:11,034 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/C/a0052fb15f9c469fa44cf1350f650d12 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/C/a0052fb15f9c469fa44cf1350f650d12 2024-11-27T16:23:11,038 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/C/a0052fb15f9c469fa44cf1350f650d12, entries=150, sequenceid=325, filesize=12.0 K 2024-11-27T16:23:11,041 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=127.47 KB/130530 for 00f170535dc2739662302d98f22dc172 in 607ms, sequenceid=325, compaction requested=false 2024-11-27T16:23:11,041 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegion(2538): Flush status journal for 00f170535dc2739662302d98f22dc172: 2024-11-27T16:23:11,041 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172. 2024-11-27T16:23:11,041 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=135 2024-11-27T16:23:11,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4106): Remote procedure done, pid=135 2024-11-27T16:23:11,043 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=135, resume processing ppid=134 2024-11-27T16:23:11,043 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=135, ppid=134, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 918 msec 2024-11-27T16:23:11,044 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=134, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=134, table=TestAcidGuarantees in 922 msec 2024-11-27T16:23:11,184 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 00f170535dc2739662302d98f22dc172 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-11-27T16:23:11,184 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 00f170535dc2739662302d98f22dc172, store=A 2024-11-27T16:23:11,185 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:23:11,185 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 00f170535dc2739662302d98f22dc172, store=B 2024-11-27T16:23:11,185 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:23:11,185 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 00f170535dc2739662302d98f22dc172, store=C 2024-11-27T16:23:11,185 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:23:11,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(8581): Flush requested on 00f170535dc2739662302d98f22dc172 2024-11-27T16:23:11,202 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/A/fc8d06d6fd24444ea2dbcd806a315618 is 50, key is test_row_0/A:col10/1732724590552/Put/seqid=0 2024-11-27T16:23:11,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=134 2024-11-27T16:23:11,229 INFO [Thread-2216 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 134 completed 2024-11-27T16:23:11,231 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-27T16:23:11,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] procedure2.ProcedureExecutor(1098): Stored pid=136, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=136, table=TestAcidGuarantees 2024-11-27T16:23:11,233 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=136, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=136, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-27T16:23:11,233 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=136, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=136, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-27T16:23:11,234 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=137, ppid=136, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-27T16:23:11,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=136 2024-11-27T16:23:11,239 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742393_1569 (size=14741) 2024-11-27T16:23:11,240 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=351 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/A/fc8d06d6fd24444ea2dbcd806a315618 2024-11-27T16:23:11,249 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/B/6f8309f8431e4f1497e30fc797a9572e is 50, key is test_row_0/B:col10/1732724590552/Put/seqid=0 2024-11-27T16:23:11,266 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:11,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38758 deadline: 1732724651247, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:11,266 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:11,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38716 deadline: 1732724651248, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:11,267 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:11,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38740 deadline: 1732724651248, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:11,267 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:11,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38754 deadline: 1732724651248, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:11,279 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:11,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38730 deadline: 1732724651266, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:11,315 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742394_1570 (size=12301) 2024-11-27T16:23:11,316 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=351 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/B/6f8309f8431e4f1497e30fc797a9572e 2024-11-27T16:23:11,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=136 2024-11-27T16:23:11,342 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/C/f0741e447a0f4104b392c62fe4ced00c is 50, key is test_row_0/C:col10/1732724590552/Put/seqid=0 2024-11-27T16:23:11,371 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:11,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38754 deadline: 1732724651369, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:11,373 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:11,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38758 deadline: 1732724651372, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:11,373 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:11,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38740 deadline: 1732724651372, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:11,373 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:11,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38716 deadline: 1732724651372, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:11,374 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742395_1571 (size=12301) 2024-11-27T16:23:11,383 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:11,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38730 deadline: 1732724651380, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:11,385 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:23:11,394 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=137 2024-11-27T16:23:11,394 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172. 2024-11-27T16:23:11,394 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172. as already flushing 2024-11-27T16:23:11,394 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172. 2024-11-27T16:23:11,394 ERROR [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] handler.RSProcedureHandler(58): pid=137 java.io.IOException: Unable to complete flush {ENCODED => 00f170535dc2739662302d98f22dc172, NAME => 'TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:23:11,394 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=137 java.io.IOException: Unable to complete flush {ENCODED => 00f170535dc2739662302d98f22dc172, NAME => 'TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:23:11,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4114): Remote procedure failed, pid=137 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 00f170535dc2739662302d98f22dc172, NAME => 'TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 00f170535dc2739662302d98f22dc172, NAME => 'TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:23:11,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=136 2024-11-27T16:23:11,559 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:23:11,559 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=137 2024-11-27T16:23:11,559 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172. 2024-11-27T16:23:11,560 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172. as already flushing 2024-11-27T16:23:11,560 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172. 2024-11-27T16:23:11,560 ERROR [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] handler.RSProcedureHandler(58): pid=137 java.io.IOException: Unable to complete flush {ENCODED => 00f170535dc2739662302d98f22dc172, NAME => 'TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:23:11,560 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=137 java.io.IOException: Unable to complete flush {ENCODED => 00f170535dc2739662302d98f22dc172, NAME => 'TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:23:11,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4114): Remote procedure failed, pid=137 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 00f170535dc2739662302d98f22dc172, NAME => 'TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 00f170535dc2739662302d98f22dc172, NAME => 'TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:23:11,587 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:11,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 147 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38754 deadline: 1732724651573, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:11,587 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:11,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 144 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38758 deadline: 1732724651575, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:11,588 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:11,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38740 deadline: 1732724651575, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:11,589 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:11,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 147 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38716 deadline: 1732724651575, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:11,594 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:11,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38730 deadline: 1732724651588, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:11,712 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:23:11,713 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=137 2024-11-27T16:23:11,713 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172. 2024-11-27T16:23:11,713 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172. as already flushing 2024-11-27T16:23:11,713 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172. 2024-11-27T16:23:11,713 ERROR [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] handler.RSProcedureHandler(58): pid=137 java.io.IOException: Unable to complete flush {ENCODED => 00f170535dc2739662302d98f22dc172, NAME => 'TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:23:11,713 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=137 java.io.IOException: Unable to complete flush {ENCODED => 00f170535dc2739662302d98f22dc172, NAME => 'TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:23:11,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4114): Remote procedure failed, pid=137 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 00f170535dc2739662302d98f22dc172, NAME => 'TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 00f170535dc2739662302d98f22dc172, NAME => 'TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:23:11,775 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=351 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/C/f0741e447a0f4104b392c62fe4ced00c 2024-11-27T16:23:11,782 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/A/fc8d06d6fd24444ea2dbcd806a315618 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/A/fc8d06d6fd24444ea2dbcd806a315618 2024-11-27T16:23:11,788 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/A/fc8d06d6fd24444ea2dbcd806a315618, entries=200, sequenceid=351, filesize=14.4 K 2024-11-27T16:23:11,788 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/B/6f8309f8431e4f1497e30fc797a9572e as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/B/6f8309f8431e4f1497e30fc797a9572e 2024-11-27T16:23:11,792 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/B/6f8309f8431e4f1497e30fc797a9572e, entries=150, sequenceid=351, filesize=12.0 K 2024-11-27T16:23:11,794 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/C/f0741e447a0f4104b392c62fe4ced00c as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/C/f0741e447a0f4104b392c62fe4ced00c 2024-11-27T16:23:11,800 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/C/f0741e447a0f4104b392c62fe4ced00c, entries=150, sequenceid=351, filesize=12.0 K 2024-11-27T16:23:11,801 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=67.09 KB/68700 for 00f170535dc2739662302d98f22dc172 in 617ms, sequenceid=351, compaction requested=true 2024-11-27T16:23:11,801 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 00f170535dc2739662302d98f22dc172: 2024-11-27T16:23:11,801 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-27T16:23:11,802 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 00f170535dc2739662302d98f22dc172:A, priority=-2147483648, current under compaction store size is 1 2024-11-27T16:23:11,802 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 40059 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-27T16:23:11,802 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HStore(1540): 00f170535dc2739662302d98f22dc172/A is initiating minor compaction (all files) 2024-11-27T16:23:11,802 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 00f170535dc2739662302d98f22dc172/A in TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172. 2024-11-27T16:23:11,802 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/A/2091a4c80c6b4793acb1641a1364d922, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/A/2f4b913bddc74ea39c7b610cd249b1c5, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/A/fc8d06d6fd24444ea2dbcd806a315618] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp, totalSize=39.1 K 2024-11-27T16:23:11,803 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2091a4c80c6b4793acb1641a1364d922, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=311, earliestPutTs=1732724589666 2024-11-27T16:23:11,803 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2f4b913bddc74ea39c7b610cd249b1c5, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=325, earliestPutTs=1732724589807 2024-11-27T16:23:11,804 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.Compactor(224): Compacting fc8d06d6fd24444ea2dbcd806a315618, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=351, earliestPutTs=1732724590538 2024-11-27T16:23:11,802 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T16:23:11,807 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-27T16:23:11,809 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37619 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-27T16:23:11,809 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HStore(1540): 00f170535dc2739662302d98f22dc172/B is initiating minor compaction (all files) 2024-11-27T16:23:11,810 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 00f170535dc2739662302d98f22dc172/B in TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172. 2024-11-27T16:23:11,810 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/B/9aa1588197e347659688112ad43f5d17, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/B/1c37a4a5a94a416fa4baefdba30ad0cb, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/B/6f8309f8431e4f1497e30fc797a9572e] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp, totalSize=36.7 K 2024-11-27T16:23:11,810 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting 9aa1588197e347659688112ad43f5d17, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=311, earliestPutTs=1732724589666 2024-11-27T16:23:11,812 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 00f170535dc2739662302d98f22dc172:B, priority=-2147483648, current under compaction store size is 2 2024-11-27T16:23:11,812 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T16:23:11,812 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting 1c37a4a5a94a416fa4baefdba30ad0cb, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=325, earliestPutTs=1732724589807 2024-11-27T16:23:11,813 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting 6f8309f8431e4f1497e30fc797a9572e, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=351, earliestPutTs=1732724590538 2024-11-27T16:23:11,813 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 00f170535dc2739662302d98f22dc172:C, priority=-2147483648, current under compaction store size is 3 2024-11-27T16:23:11,814 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-27T16:23:11,826 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 00f170535dc2739662302d98f22dc172#A#compaction#486 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-27T16:23:11,826 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/A/8e7aab86d4b048adb6adf190956faadf is 50, key is test_row_0/A:col10/1732724590552/Put/seqid=0 2024-11-27T16:23:11,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=136 2024-11-27T16:23:11,849 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 00f170535dc2739662302d98f22dc172#B#compaction#487 average throughput is 2.18 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-27T16:23:11,850 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/B/29b65fdf90dd47379046befca53e980d is 50, key is test_row_0/B:col10/1732724590552/Put/seqid=0 2024-11-27T16:23:11,866 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:23:11,866 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=137 2024-11-27T16:23:11,867 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172. 2024-11-27T16:23:11,867 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegion(2837): Flushing 00f170535dc2739662302d98f22dc172 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-11-27T16:23:11,867 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 00f170535dc2739662302d98f22dc172, store=A 2024-11-27T16:23:11,867 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:23:11,867 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 00f170535dc2739662302d98f22dc172, store=B 2024-11-27T16:23:11,867 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:23:11,867 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 00f170535dc2739662302d98f22dc172, store=C 2024-11-27T16:23:11,867 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:23:11,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(8581): Flush requested on 00f170535dc2739662302d98f22dc172 2024-11-27T16:23:11,897 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172. as already flushing 2024-11-27T16:23:11,898 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742396_1572 (size=13119) 2024-11-27T16:23:11,899 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/A/32ec75c3e0974858abf81a3a714c1548 is 50, key is test_row_0/A:col10/1732724591221/Put/seqid=0 2024-11-27T16:23:11,914 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742398_1574 (size=12301) 2024-11-27T16:23:11,915 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742397_1573 (size=13119) 2024-11-27T16:23:11,916 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=364 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/A/32ec75c3e0974858abf81a3a714c1548 2024-11-27T16:23:11,922 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/B/29b65fdf90dd47379046befca53e980d as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/B/29b65fdf90dd47379046befca53e980d 2024-11-27T16:23:11,932 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 00f170535dc2739662302d98f22dc172/B of 00f170535dc2739662302d98f22dc172 into 29b65fdf90dd47379046befca53e980d(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T16:23:11,932 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 00f170535dc2739662302d98f22dc172: 2024-11-27T16:23:11,932 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172., storeName=00f170535dc2739662302d98f22dc172/B, priority=13, startTime=1732724591807; duration=0sec 2024-11-27T16:23:11,932 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-27T16:23:11,932 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 00f170535dc2739662302d98f22dc172:B 2024-11-27T16:23:11,933 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-27T16:23:11,934 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37619 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-27T16:23:11,934 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HStore(1540): 00f170535dc2739662302d98f22dc172/C is initiating minor compaction (all files) 2024-11-27T16:23:11,934 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 00f170535dc2739662302d98f22dc172/C in TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172. 2024-11-27T16:23:11,934 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/C/325e959dab6a410d824f264134169cf5, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/C/a0052fb15f9c469fa44cf1350f650d12, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/C/f0741e447a0f4104b392c62fe4ced00c] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp, totalSize=36.7 K 2024-11-27T16:23:11,934 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting 325e959dab6a410d824f264134169cf5, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=311, earliestPutTs=1732724589666 2024-11-27T16:23:11,935 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting a0052fb15f9c469fa44cf1350f650d12, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=325, earliestPutTs=1732724589807 2024-11-27T16:23:11,935 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting f0741e447a0f4104b392c62fe4ced00c, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=351, earliestPutTs=1732724590538 2024-11-27T16:23:11,943 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/B/af48d16ee85e4a52bfa36f65057b74e7 is 50, key is test_row_0/B:col10/1732724591221/Put/seqid=0 2024-11-27T16:23:11,945 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 00f170535dc2739662302d98f22dc172#C#compaction#490 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-27T16:23:11,946 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/C/6699b72357a945aca96020e22129b03e is 50, key is test_row_0/C:col10/1732724590552/Put/seqid=0 2024-11-27T16:23:11,957 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742400_1576 (size=13119) 2024-11-27T16:23:11,961 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/C/6699b72357a945aca96020e22129b03e as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/C/6699b72357a945aca96020e22129b03e 2024-11-27T16:23:11,965 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:11,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38758 deadline: 1732724651950, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:11,965 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:11,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 155 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38730 deadline: 1732724651950, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:11,966 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:11,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 153 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38716 deadline: 1732724651952, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:11,967 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:11,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38740 deadline: 1732724651953, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:11,968 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 00f170535dc2739662302d98f22dc172/C of 00f170535dc2739662302d98f22dc172 into 6699b72357a945aca96020e22129b03e(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T16:23:11,968 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 00f170535dc2739662302d98f22dc172: 2024-11-27T16:23:11,968 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172., storeName=00f170535dc2739662302d98f22dc172/C, priority=13, startTime=1732724591812; duration=0sec 2024-11-27T16:23:11,968 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T16:23:11,968 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 00f170535dc2739662302d98f22dc172:C 2024-11-27T16:23:11,976 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:11,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38754 deadline: 1732724651964, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:11,981 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742399_1575 (size=12301) 2024-11-27T16:23:11,989 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=364 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/B/af48d16ee85e4a52bfa36f65057b74e7 2024-11-27T16:23:11,998 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/C/a80c895dc24f408080ccec613b2c533a is 50, key is test_row_0/C:col10/1732724591221/Put/seqid=0 2024-11-27T16:23:12,040 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742401_1577 (size=12301) 2024-11-27T16:23:12,041 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=364 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/C/a80c895dc24f408080ccec613b2c533a 2024-11-27T16:23:12,047 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/A/32ec75c3e0974858abf81a3a714c1548 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/A/32ec75c3e0974858abf81a3a714c1548 2024-11-27T16:23:12,052 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/A/32ec75c3e0974858abf81a3a714c1548, entries=150, sequenceid=364, filesize=12.0 K 2024-11-27T16:23:12,054 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/B/af48d16ee85e4a52bfa36f65057b74e7 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/B/af48d16ee85e4a52bfa36f65057b74e7 2024-11-27T16:23:12,062 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/B/af48d16ee85e4a52bfa36f65057b74e7, entries=150, sequenceid=364, filesize=12.0 K 2024-11-27T16:23:12,064 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/C/a80c895dc24f408080ccec613b2c533a as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/C/a80c895dc24f408080ccec613b2c533a 2024-11-27T16:23:12,069 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/C/a80c895dc24f408080ccec613b2c533a, entries=150, sequenceid=364, filesize=12.0 K 2024-11-27T16:23:12,070 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for 00f170535dc2739662302d98f22dc172 in 203ms, sequenceid=364, compaction requested=false 2024-11-27T16:23:12,070 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegion(2538): Flush status journal for 00f170535dc2739662302d98f22dc172: 2024-11-27T16:23:12,070 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172. 2024-11-27T16:23:12,071 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=137 2024-11-27T16:23:12,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4106): Remote procedure done, pid=137 2024-11-27T16:23:12,072 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 00f170535dc2739662302d98f22dc172 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-11-27T16:23:12,072 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 00f170535dc2739662302d98f22dc172, store=A 2024-11-27T16:23:12,072 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:23:12,072 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 00f170535dc2739662302d98f22dc172, store=B 2024-11-27T16:23:12,073 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:23:12,073 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 00f170535dc2739662302d98f22dc172, store=C 2024-11-27T16:23:12,073 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:23:12,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(8581): Flush requested on 00f170535dc2739662302d98f22dc172 2024-11-27T16:23:12,073 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=137, resume processing ppid=136 2024-11-27T16:23:12,073 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=137, ppid=136, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 837 msec 2024-11-27T16:23:12,078 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/A/2f6313c4596c4b4a987e88f797179aa5 is 50, key is test_row_0/A:col10/1732724591947/Put/seqid=0 2024-11-27T16:23:12,080 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=136, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=136, table=TestAcidGuarantees in 843 msec 2024-11-27T16:23:12,102 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:12,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 157 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38716 deadline: 1732724652088, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:12,102 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:12,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 160 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38740 deadline: 1732724652089, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:12,103 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:12,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 159 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38730 deadline: 1732724652089, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:12,103 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:12,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 157 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38754 deadline: 1732724652089, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:12,118 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:12,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 155 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38758 deadline: 1732724652102, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:12,121 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742402_1578 (size=14741) 2024-11-27T16:23:12,121 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=390 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/A/2f6313c4596c4b4a987e88f797179aa5 2024-11-27T16:23:12,150 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/B/37fffaf3fd224b3db62622c1c714b61a is 50, key is test_row_0/B:col10/1732724591947/Put/seqid=0 2024-11-27T16:23:12,184 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742403_1579 (size=12301) 2024-11-27T16:23:12,214 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:12,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 159 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38716 deadline: 1732724652203, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:12,215 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:12,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 162 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38740 deadline: 1732724652203, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:12,216 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:12,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 161 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38730 deadline: 1732724652204, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:12,216 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:12,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 159 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38754 deadline: 1732724652204, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:12,229 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:12,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 157 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38758 deadline: 1732724652219, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:12,303 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/A/8e7aab86d4b048adb6adf190956faadf as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/A/8e7aab86d4b048adb6adf190956faadf 2024-11-27T16:23:12,309 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 00f170535dc2739662302d98f22dc172/A of 00f170535dc2739662302d98f22dc172 into 8e7aab86d4b048adb6adf190956faadf(size=12.8 K), total size for store is 24.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T16:23:12,309 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 00f170535dc2739662302d98f22dc172: 2024-11-27T16:23:12,309 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172., storeName=00f170535dc2739662302d98f22dc172/A, priority=13, startTime=1732724591801; duration=0sec 2024-11-27T16:23:12,310 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T16:23:12,310 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 00f170535dc2739662302d98f22dc172:A 2024-11-27T16:23:12,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=136 2024-11-27T16:23:12,346 INFO [Thread-2216 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 136 completed 2024-11-27T16:23:12,348 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-27T16:23:12,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] procedure2.ProcedureExecutor(1098): Stored pid=138, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=138, table=TestAcidGuarantees 2024-11-27T16:23:12,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=138 2024-11-27T16:23:12,350 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=138, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=138, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-27T16:23:12,351 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=138, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=138, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-27T16:23:12,351 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=139, ppid=138, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-27T16:23:12,427 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:12,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 164 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38740 deadline: 1732724652418, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:12,428 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:12,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 163 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38730 deadline: 1732724652418, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:12,429 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:12,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 161 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38716 deadline: 1732724652417, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:12,429 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:12,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 161 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38754 deadline: 1732724652420, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:12,440 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:12,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 159 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38758 deadline: 1732724652432, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:12,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=138 2024-11-27T16:23:12,504 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:23:12,504 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=139 2024-11-27T16:23:12,504 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172. 2024-11-27T16:23:12,504 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172. as already flushing 2024-11-27T16:23:12,504 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172. 2024-11-27T16:23:12,504 ERROR [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] handler.RSProcedureHandler(58): pid=139 java.io.IOException: Unable to complete flush {ENCODED => 00f170535dc2739662302d98f22dc172, NAME => 'TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:23:12,505 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=139 java.io.IOException: Unable to complete flush {ENCODED => 00f170535dc2739662302d98f22dc172, NAME => 'TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:23:12,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4114): Remote procedure failed, pid=139 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 00f170535dc2739662302d98f22dc172, NAME => 'TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 00f170535dc2739662302d98f22dc172, NAME => 'TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:23:12,586 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=390 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/B/37fffaf3fd224b3db62622c1c714b61a 2024-11-27T16:23:12,606 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/C/7437d1c3b25b4f3c968a8595076467e4 is 50, key is test_row_0/C:col10/1732724591947/Put/seqid=0 2024-11-27T16:23:12,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=138 2024-11-27T16:23:12,659 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:23:12,660 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=139 2024-11-27T16:23:12,661 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172. 2024-11-27T16:23:12,661 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172. as already flushing 2024-11-27T16:23:12,661 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172. 2024-11-27T16:23:12,661 ERROR [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] handler.RSProcedureHandler(58): pid=139 java.io.IOException: Unable to complete flush {ENCODED => 00f170535dc2739662302d98f22dc172, NAME => 'TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:23:12,661 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=139 java.io.IOException: Unable to complete flush {ENCODED => 00f170535dc2739662302d98f22dc172, NAME => 'TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:23:12,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4114): Remote procedure failed, pid=139 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 00f170535dc2739662302d98f22dc172, NAME => 'TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 00f170535dc2739662302d98f22dc172, NAME => 'TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:23:12,664 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742404_1580 (size=12301) 2024-11-27T16:23:12,738 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:12,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 166 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38740 deadline: 1732724652730, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:12,739 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:12,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 163 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38716 deadline: 1732724652731, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:12,740 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:12,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 165 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38730 deadline: 1732724652731, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:12,740 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:12,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 163 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38754 deadline: 1732724652732, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:12,749 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:12,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 161 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38758 deadline: 1732724652741, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:12,815 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:23:12,816 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=139 2024-11-27T16:23:12,816 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172. 2024-11-27T16:23:12,816 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172. as already flushing 2024-11-27T16:23:12,816 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172. 2024-11-27T16:23:12,816 ERROR [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] handler.RSProcedureHandler(58): pid=139 java.io.IOException: Unable to complete flush {ENCODED => 00f170535dc2739662302d98f22dc172, NAME => 'TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:23:12,816 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=139 java.io.IOException: Unable to complete flush {ENCODED => 00f170535dc2739662302d98f22dc172, NAME => 'TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:23:12,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4114): Remote procedure failed, pid=139 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 00f170535dc2739662302d98f22dc172, NAME => 'TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 00f170535dc2739662302d98f22dc172, NAME => 'TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:23:12,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=138 2024-11-27T16:23:12,968 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:23:12,971 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=139 2024-11-27T16:23:12,971 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172. 2024-11-27T16:23:12,971 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172. as already flushing 2024-11-27T16:23:12,971 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172. 2024-11-27T16:23:12,971 ERROR [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] handler.RSProcedureHandler(58): pid=139 java.io.IOException: Unable to complete flush {ENCODED => 00f170535dc2739662302d98f22dc172, NAME => 'TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:23:12,971 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=139 java.io.IOException: Unable to complete flush {ENCODED => 00f170535dc2739662302d98f22dc172, NAME => 'TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:23:12,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4114): Remote procedure failed, pid=139 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 00f170535dc2739662302d98f22dc172, NAME => 'TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 00f170535dc2739662302d98f22dc172, NAME => 'TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:23:13,066 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=390 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/C/7437d1c3b25b4f3c968a8595076467e4 2024-11-27T16:23:13,070 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/A/2f6313c4596c4b4a987e88f797179aa5 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/A/2f6313c4596c4b4a987e88f797179aa5 2024-11-27T16:23:13,074 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/A/2f6313c4596c4b4a987e88f797179aa5, entries=200, sequenceid=390, filesize=14.4 K 2024-11-27T16:23:13,075 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/B/37fffaf3fd224b3db62622c1c714b61a as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/B/37fffaf3fd224b3db62622c1c714b61a 2024-11-27T16:23:13,080 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/B/37fffaf3fd224b3db62622c1c714b61a, entries=150, sequenceid=390, filesize=12.0 K 2024-11-27T16:23:13,083 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/C/7437d1c3b25b4f3c968a8595076467e4 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/C/7437d1c3b25b4f3c968a8595076467e4 2024-11-27T16:23:13,087 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/C/7437d1c3b25b4f3c968a8595076467e4, entries=150, sequenceid=390, filesize=12.0 K 2024-11-27T16:23:13,088 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=60.38 KB/61830 for 00f170535dc2739662302d98f22dc172 in 1016ms, sequenceid=390, compaction requested=true 2024-11-27T16:23:13,088 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 00f170535dc2739662302d98f22dc172: 2024-11-27T16:23:13,088 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 00f170535dc2739662302d98f22dc172:A, priority=-2147483648, current under compaction store size is 1 2024-11-27T16:23:13,088 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T16:23:13,088 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-27T16:23:13,088 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-27T16:23:13,089 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 00f170535dc2739662302d98f22dc172:B, priority=-2147483648, current under compaction store size is 2 2024-11-27T16:23:13,089 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T16:23:13,089 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 00f170535dc2739662302d98f22dc172:C, priority=-2147483648, current under compaction store size is 3 2024-11-27T16:23:13,089 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-27T16:23:13,090 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 40161 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-27T16:23:13,090 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HStore(1540): 00f170535dc2739662302d98f22dc172/A is initiating minor compaction (all files) 2024-11-27T16:23:13,090 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 00f170535dc2739662302d98f22dc172/A in TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172. 2024-11-27T16:23:13,090 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/A/8e7aab86d4b048adb6adf190956faadf, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/A/32ec75c3e0974858abf81a3a714c1548, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/A/2f6313c4596c4b4a987e88f797179aa5] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp, totalSize=39.2 K 2024-11-27T16:23:13,090 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37721 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-27T16:23:13,091 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HStore(1540): 00f170535dc2739662302d98f22dc172/B is initiating minor compaction (all files) 2024-11-27T16:23:13,091 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 00f170535dc2739662302d98f22dc172/B in TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172. 2024-11-27T16:23:13,091 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/B/29b65fdf90dd47379046befca53e980d, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/B/af48d16ee85e4a52bfa36f65057b74e7, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/B/37fffaf3fd224b3db62622c1c714b61a] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp, totalSize=36.8 K 2024-11-27T16:23:13,091 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8e7aab86d4b048adb6adf190956faadf, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=351, earliestPutTs=1732724590538 2024-11-27T16:23:13,091 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting 29b65fdf90dd47379046befca53e980d, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=351, earliestPutTs=1732724590538 2024-11-27T16:23:13,091 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 32ec75c3e0974858abf81a3a714c1548, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=364, earliestPutTs=1732724591199 2024-11-27T16:23:13,092 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2f6313c4596c4b4a987e88f797179aa5, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=390, earliestPutTs=1732724591947 2024-11-27T16:23:13,092 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting af48d16ee85e4a52bfa36f65057b74e7, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=364, earliestPutTs=1732724591199 2024-11-27T16:23:13,094 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting 37fffaf3fd224b3db62622c1c714b61a, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=390, earliestPutTs=1732724591947 2024-11-27T16:23:13,106 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 00f170535dc2739662302d98f22dc172#A#compaction#495 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-27T16:23:13,107 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/A/f21b191690054776ba08244a3f0429b3 is 50, key is test_row_0/A:col10/1732724591947/Put/seqid=0 2024-11-27T16:23:13,110 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 00f170535dc2739662302d98f22dc172#B#compaction#496 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-27T16:23:13,110 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/B/1147bbdb737d4a9e8492f5bde6d9cb23 is 50, key is test_row_0/B:col10/1732724591947/Put/seqid=0 2024-11-27T16:23:13,123 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:23:13,124 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=139 2024-11-27T16:23:13,124 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172. 2024-11-27T16:23:13,124 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegion(2837): Flushing 00f170535dc2739662302d98f22dc172 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-11-27T16:23:13,124 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 00f170535dc2739662302d98f22dc172, store=A 2024-11-27T16:23:13,124 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:23:13,124 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 00f170535dc2739662302d98f22dc172, store=B 2024-11-27T16:23:13,124 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:23:13,124 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 00f170535dc2739662302d98f22dc172, store=C 2024-11-27T16:23:13,124 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:23:13,157 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/A/6a34adcb00a6486a92bc6b36938344fa is 50, key is test_row_0/A:col10/1732724592087/Put/seqid=0 2024-11-27T16:23:13,183 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742406_1582 (size=13221) 2024-11-27T16:23:13,190 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742405_1581 (size=13221) 2024-11-27T16:23:13,191 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/B/1147bbdb737d4a9e8492f5bde6d9cb23 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/B/1147bbdb737d4a9e8492f5bde6d9cb23 2024-11-27T16:23:13,198 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/A/f21b191690054776ba08244a3f0429b3 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/A/f21b191690054776ba08244a3f0429b3 2024-11-27T16:23:13,199 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 00f170535dc2739662302d98f22dc172/B of 00f170535dc2739662302d98f22dc172 into 1147bbdb737d4a9e8492f5bde6d9cb23(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T16:23:13,199 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 00f170535dc2739662302d98f22dc172: 2024-11-27T16:23:13,199 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172., storeName=00f170535dc2739662302d98f22dc172/B, priority=13, startTime=1732724593088; duration=0sec 2024-11-27T16:23:13,199 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-27T16:23:13,199 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 00f170535dc2739662302d98f22dc172:B 2024-11-27T16:23:13,199 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-27T16:23:13,202 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37721 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-27T16:23:13,202 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HStore(1540): 00f170535dc2739662302d98f22dc172/C is initiating minor compaction (all files) 2024-11-27T16:23:13,202 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 00f170535dc2739662302d98f22dc172/C in TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172. 2024-11-27T16:23:13,202 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/C/6699b72357a945aca96020e22129b03e, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/C/a80c895dc24f408080ccec613b2c533a, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/C/7437d1c3b25b4f3c968a8595076467e4] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp, totalSize=36.8 K 2024-11-27T16:23:13,202 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 00f170535dc2739662302d98f22dc172/A of 00f170535dc2739662302d98f22dc172 into f21b191690054776ba08244a3f0429b3(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T16:23:13,202 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 00f170535dc2739662302d98f22dc172: 2024-11-27T16:23:13,203 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172., storeName=00f170535dc2739662302d98f22dc172/A, priority=13, startTime=1732724593088; duration=0sec 2024-11-27T16:23:13,203 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T16:23:13,203 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 00f170535dc2739662302d98f22dc172:A 2024-11-27T16:23:13,203 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting 6699b72357a945aca96020e22129b03e, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=351, earliestPutTs=1732724590538 2024-11-27T16:23:13,203 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting a80c895dc24f408080ccec613b2c533a, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=364, earliestPutTs=1732724591199 2024-11-27T16:23:13,204 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting 7437d1c3b25b4f3c968a8595076467e4, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=390, earliestPutTs=1732724591947 2024-11-27T16:23:13,208 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742407_1583 (size=12301) 2024-11-27T16:23:13,209 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=403 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/A/6a34adcb00a6486a92bc6b36938344fa 2024-11-27T16:23:13,220 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 00f170535dc2739662302d98f22dc172#C#compaction#498 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-27T16:23:13,220 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/C/59c38ccfa5b74759a91333b880d512bd is 50, key is test_row_0/C:col10/1732724591947/Put/seqid=0 2024-11-27T16:23:13,227 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/B/36ed274f8c2a4ca89c816ebb93402fdb is 50, key is test_row_0/B:col10/1732724592087/Put/seqid=0 2024-11-27T16:23:13,245 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172. as already flushing 2024-11-27T16:23:13,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(8581): Flush requested on 00f170535dc2739662302d98f22dc172 2024-11-27T16:23:13,263 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742408_1584 (size=13221) 2024-11-27T16:23:13,265 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742409_1585 (size=12301) 2024-11-27T16:23:13,266 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=403 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/B/36ed274f8c2a4ca89c816ebb93402fdb 2024-11-27T16:23:13,287 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/C/2a90cc06e1754d31b1d2ef5b9c499b1d is 50, key is test_row_0/C:col10/1732724592087/Put/seqid=0 2024-11-27T16:23:13,318 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:13,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 171 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38730 deadline: 1732724653303, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:13,319 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:13,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 172 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38740 deadline: 1732724653305, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:13,319 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:13,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 169 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38754 deadline: 1732724653306, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:13,321 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:13,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 167 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38758 deadline: 1732724653308, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:13,327 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:13,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 170 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38716 deadline: 1732724653318, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:13,345 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742410_1586 (size=12301) 2024-11-27T16:23:13,346 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=403 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/C/2a90cc06e1754d31b1d2ef5b9c499b1d 2024-11-27T16:23:13,355 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/A/6a34adcb00a6486a92bc6b36938344fa as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/A/6a34adcb00a6486a92bc6b36938344fa 2024-11-27T16:23:13,359 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/A/6a34adcb00a6486a92bc6b36938344fa, entries=150, sequenceid=403, filesize=12.0 K 2024-11-27T16:23:13,360 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/B/36ed274f8c2a4ca89c816ebb93402fdb as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/B/36ed274f8c2a4ca89c816ebb93402fdb 2024-11-27T16:23:13,365 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/B/36ed274f8c2a4ca89c816ebb93402fdb, entries=150, sequenceid=403, filesize=12.0 K 2024-11-27T16:23:13,367 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/C/2a90cc06e1754d31b1d2ef5b9c499b1d as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/C/2a90cc06e1754d31b1d2ef5b9c499b1d 2024-11-27T16:23:13,379 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/C/2a90cc06e1754d31b1d2ef5b9c499b1d, entries=150, sequenceid=403, filesize=12.0 K 2024-11-27T16:23:13,380 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for 00f170535dc2739662302d98f22dc172 in 256ms, sequenceid=403, compaction requested=false 2024-11-27T16:23:13,380 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegion(2538): Flush status journal for 00f170535dc2739662302d98f22dc172: 2024-11-27T16:23:13,380 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172. 2024-11-27T16:23:13,380 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=139 2024-11-27T16:23:13,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4106): Remote procedure done, pid=139 2024-11-27T16:23:13,385 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=139, resume processing ppid=138 2024-11-27T16:23:13,386 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=139, ppid=138, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.0300 sec 2024-11-27T16:23:13,387 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=138, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=138, table=TestAcidGuarantees in 1.0380 sec 2024-11-27T16:23:13,426 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 00f170535dc2739662302d98f22dc172 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-27T16:23:13,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(8581): Flush requested on 00f170535dc2739662302d98f22dc172 2024-11-27T16:23:13,427 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 00f170535dc2739662302d98f22dc172, store=A 2024-11-27T16:23:13,427 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:23:13,427 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 00f170535dc2739662302d98f22dc172, store=B 2024-11-27T16:23:13,427 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:23:13,427 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 00f170535dc2739662302d98f22dc172, store=C 2024-11-27T16:23:13,427 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:23:13,443 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/A/e2a2a529f4af4e01b9442c70cc5d47a3 is 50, key is test_row_0/A:col10/1732724593425/Put/seqid=0 2024-11-27T16:23:13,454 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:13,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 176 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38740 deadline: 1732724653443, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:13,454 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:13,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 171 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38758 deadline: 1732724653443, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:13,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=138 2024-11-27T16:23:13,455 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:13,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 173 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38754 deadline: 1732724653444, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:13,456 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:13,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 176 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38730 deadline: 1732724653453, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:13,456 INFO [Thread-2216 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 138 completed 2024-11-27T16:23:13,457 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-27T16:23:13,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] procedure2.ProcedureExecutor(1098): Stored pid=140, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=140, table=TestAcidGuarantees 2024-11-27T16:23:13,459 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=140, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=140, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-27T16:23:13,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=140 2024-11-27T16:23:13,459 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=140, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=140, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-27T16:23:13,460 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=141, ppid=140, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-27T16:23:13,464 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:13,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 173 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38716 deadline: 1732724653454, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:13,505 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742411_1587 (size=17181) 2024-11-27T16:23:13,517 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=431 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/A/e2a2a529f4af4e01b9442c70cc5d47a3 2024-11-27T16:23:13,538 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/B/b93592fa81e24280b32a40afe7bcc0c1 is 50, key is test_row_0/B:col10/1732724593425/Put/seqid=0 2024-11-27T16:23:13,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=140 2024-11-27T16:23:13,561 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:13,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 178 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38740 deadline: 1732724653555, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:13,561 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:13,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 173 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38758 deadline: 1732724653556, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:13,567 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:13,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 175 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38754 deadline: 1732724653557, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:13,567 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:13,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 178 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38730 deadline: 1732724653557, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:13,572 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:13,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 175 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38716 deadline: 1732724653565, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:13,581 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742412_1588 (size=12301) 2024-11-27T16:23:13,582 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=431 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/B/b93592fa81e24280b32a40afe7bcc0c1 2024-11-27T16:23:13,607 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/C/43e05408466d42569a4e8051dee0e621 is 50, key is test_row_0/C:col10/1732724593425/Put/seqid=0 2024-11-27T16:23:13,611 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:23:13,612 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=141 2024-11-27T16:23:13,612 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172. 2024-11-27T16:23:13,612 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172. as already flushing 2024-11-27T16:23:13,612 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172. 2024-11-27T16:23:13,612 ERROR [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] handler.RSProcedureHandler(58): pid=141 java.io.IOException: Unable to complete flush {ENCODED => 00f170535dc2739662302d98f22dc172, NAME => 'TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:23:13,612 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=141 java.io.IOException: Unable to complete flush {ENCODED => 00f170535dc2739662302d98f22dc172, NAME => 'TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:23:13,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4114): Remote procedure failed, pid=141 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 00f170535dc2739662302d98f22dc172, NAME => 'TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 00f170535dc2739662302d98f22dc172, NAME => 'TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:23:13,634 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742413_1589 (size=12301) 2024-11-27T16:23:13,635 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=431 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/C/43e05408466d42569a4e8051dee0e621 2024-11-27T16:23:13,640 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/A/e2a2a529f4af4e01b9442c70cc5d47a3 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/A/e2a2a529f4af4e01b9442c70cc5d47a3 2024-11-27T16:23:13,643 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/A/e2a2a529f4af4e01b9442c70cc5d47a3, entries=250, sequenceid=431, filesize=16.8 K 2024-11-27T16:23:13,644 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/B/b93592fa81e24280b32a40afe7bcc0c1 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/B/b93592fa81e24280b32a40afe7bcc0c1 2024-11-27T16:23:13,647 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/B/b93592fa81e24280b32a40afe7bcc0c1, entries=150, sequenceid=431, filesize=12.0 K 2024-11-27T16:23:13,648 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/C/43e05408466d42569a4e8051dee0e621 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/C/43e05408466d42569a4e8051dee0e621 2024-11-27T16:23:13,652 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/C/43e05408466d42569a4e8051dee0e621, entries=150, sequenceid=431, filesize=12.0 K 2024-11-27T16:23:13,652 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=53.67 KB/54960 for 00f170535dc2739662302d98f22dc172 in 226ms, sequenceid=431, compaction requested=true 2024-11-27T16:23:13,653 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 00f170535dc2739662302d98f22dc172: 2024-11-27T16:23:13,653 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 00f170535dc2739662302d98f22dc172:A, priority=-2147483648, current under compaction store size is 2 2024-11-27T16:23:13,653 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-27T16:23:13,653 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 00f170535dc2739662302d98f22dc172:B, priority=-2147483648, current under compaction store size is 3 2024-11-27T16:23:13,653 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-11-27T16:23:13,653 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 00f170535dc2739662302d98f22dc172:C, priority=-2147483648, current under compaction store size is 3 2024-11-27T16:23:13,653 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=3), splitQueue=0 2024-11-27T16:23:13,653 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 5 store files, 3 compacting, 2 eligible, 16 blocking 2024-11-27T16:23:13,654 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 0 files of size 0 starting at candidate #-1 after considering 0 permutations with 0 in ratio 2024-11-27T16:23:13,654 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.SortedCompactionPolicy(232): Not compacting files because we only have 0 files ready for compaction. Need 3 to initiate. 2024-11-27T16:23:13,654 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.CompactSplit(450): Not compacting TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172. because compaction request was cancelled 2024-11-27T16:23:13,654 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 00f170535dc2739662302d98f22dc172:C 2024-11-27T16:23:13,654 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-27T16:23:13,655 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 42703 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-27T16:23:13,655 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HStore(1540): 00f170535dc2739662302d98f22dc172/A is initiating minor compaction (all files) 2024-11-27T16:23:13,655 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 00f170535dc2739662302d98f22dc172/A in TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172. 2024-11-27T16:23:13,655 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/A/f21b191690054776ba08244a3f0429b3, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/A/6a34adcb00a6486a92bc6b36938344fa, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/A/e2a2a529f4af4e01b9442c70cc5d47a3] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp, totalSize=41.7 K 2024-11-27T16:23:13,655 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.Compactor(224): Compacting f21b191690054776ba08244a3f0429b3, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=390, earliestPutTs=1732724591947 2024-11-27T16:23:13,656 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6a34adcb00a6486a92bc6b36938344fa, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=403, earliestPutTs=1732724592086 2024-11-27T16:23:13,657 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.Compactor(224): Compacting e2a2a529f4af4e01b9442c70cc5d47a3, keycount=250, bloomtype=ROW, size=16.8 K, encoding=NONE, compression=NONE, seqNum=431, earliestPutTs=1732724593303 2024-11-27T16:23:13,669 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/C/59c38ccfa5b74759a91333b880d512bd as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/C/59c38ccfa5b74759a91333b880d512bd 2024-11-27T16:23:13,677 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 00f170535dc2739662302d98f22dc172/C of 00f170535dc2739662302d98f22dc172 into 59c38ccfa5b74759a91333b880d512bd(size=12.9 K), total size for store is 36.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T16:23:13,677 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 00f170535dc2739662302d98f22dc172: 2024-11-27T16:23:13,677 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172., storeName=00f170535dc2739662302d98f22dc172/C, priority=13, startTime=1732724593089; duration=0sec 2024-11-27T16:23:13,677 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-27T16:23:13,677 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 00f170535dc2739662302d98f22dc172:C 2024-11-27T16:23:13,677 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-27T16:23:13,686 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 00f170535dc2739662302d98f22dc172#A#compaction#504 average throughput is 2.18 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-27T16:23:13,687 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/A/333c5f8488d0457c9b04488b7ba6fe54 is 50, key is test_row_0/A:col10/1732724593425/Put/seqid=0 2024-11-27T16:23:13,693 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37823 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-27T16:23:13,693 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HStore(1540): 00f170535dc2739662302d98f22dc172/B is initiating minor compaction (all files) 2024-11-27T16:23:13,693 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 00f170535dc2739662302d98f22dc172/B in TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172. 2024-11-27T16:23:13,693 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/B/1147bbdb737d4a9e8492f5bde6d9cb23, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/B/36ed274f8c2a4ca89c816ebb93402fdb, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/B/b93592fa81e24280b32a40afe7bcc0c1] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp, totalSize=36.9 K 2024-11-27T16:23:13,694 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting 1147bbdb737d4a9e8492f5bde6d9cb23, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=390, earliestPutTs=1732724591947 2024-11-27T16:23:13,694 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting 36ed274f8c2a4ca89c816ebb93402fdb, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=403, earliestPutTs=1732724592086 2024-11-27T16:23:13,694 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting b93592fa81e24280b32a40afe7bcc0c1, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=431, earliestPutTs=1732724593303 2024-11-27T16:23:13,722 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 00f170535dc2739662302d98f22dc172#B#compaction#505 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-27T16:23:13,722 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/B/f04fea1417df4934a4f7c323065fb1d7 is 50, key is test_row_0/B:col10/1732724593425/Put/seqid=0 2024-11-27T16:23:13,742 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742414_1590 (size=13323) 2024-11-27T16:23:13,749 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/A/333c5f8488d0457c9b04488b7ba6fe54 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/A/333c5f8488d0457c9b04488b7ba6fe54 2024-11-27T16:23:13,754 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 00f170535dc2739662302d98f22dc172/A of 00f170535dc2739662302d98f22dc172 into 333c5f8488d0457c9b04488b7ba6fe54(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T16:23:13,754 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 00f170535dc2739662302d98f22dc172: 2024-11-27T16:23:13,754 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172., storeName=00f170535dc2739662302d98f22dc172/A, priority=13, startTime=1732724593653; duration=0sec 2024-11-27T16:23:13,754 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T16:23:13,754 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 00f170535dc2739662302d98f22dc172:A 2024-11-27T16:23:13,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=140 2024-11-27T16:23:13,764 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:23:13,765 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=141 2024-11-27T16:23:13,765 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172. 2024-11-27T16:23:13,766 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegion(2837): Flushing 00f170535dc2739662302d98f22dc172 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-27T16:23:13,766 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 00f170535dc2739662302d98f22dc172, store=A 2024-11-27T16:23:13,766 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:23:13,766 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 00f170535dc2739662302d98f22dc172, store=B 2024-11-27T16:23:13,766 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:23:13,766 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 00f170535dc2739662302d98f22dc172, store=C 2024-11-27T16:23:13,766 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:23:13,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(8581): Flush requested on 00f170535dc2739662302d98f22dc172 2024-11-27T16:23:13,772 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172. as already flushing 2024-11-27T16:23:13,779 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/A/d19abd991f084a37bfbba9a71f90dbf1 is 50, key is test_row_0/A:col10/1732724593441/Put/seqid=0 2024-11-27T16:23:13,780 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742415_1591 (size=13323) 2024-11-27T16:23:13,799 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742416_1592 (size=12301) 2024-11-27T16:23:13,803 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=444 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/A/d19abd991f084a37bfbba9a71f90dbf1 2024-11-27T16:23:13,812 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/B/2190680471c3439baf8fc1e6064ef09b is 50, key is test_row_0/B:col10/1732724593441/Put/seqid=0 2024-11-27T16:23:13,841 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742417_1593 (size=12301) 2024-11-27T16:23:13,842 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=444 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/B/2190680471c3439baf8fc1e6064ef09b 2024-11-27T16:23:13,863 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/C/529e2e8e231d4e1b9abc39612abf5e42 is 50, key is test_row_0/C:col10/1732724593441/Put/seqid=0 2024-11-27T16:23:13,878 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:13,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 181 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38754 deadline: 1732724653865, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:13,878 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:13,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 181 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38716 deadline: 1732724653866, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:13,879 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:13,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 184 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38730 deadline: 1732724653867, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:13,880 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:13,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 180 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38758 deadline: 1732724653869, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:13,881 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:13,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 185 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38740 deadline: 1732724653869, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:13,920 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742418_1594 (size=12301) 2024-11-27T16:23:13,920 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=444 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/C/529e2e8e231d4e1b9abc39612abf5e42 2024-11-27T16:23:13,925 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/A/d19abd991f084a37bfbba9a71f90dbf1 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/A/d19abd991f084a37bfbba9a71f90dbf1 2024-11-27T16:23:13,929 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/A/d19abd991f084a37bfbba9a71f90dbf1, entries=150, sequenceid=444, filesize=12.0 K 2024-11-27T16:23:13,930 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/B/2190680471c3439baf8fc1e6064ef09b as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/B/2190680471c3439baf8fc1e6064ef09b 2024-11-27T16:23:13,934 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/B/2190680471c3439baf8fc1e6064ef09b, entries=150, sequenceid=444, filesize=12.0 K 2024-11-27T16:23:13,934 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/C/529e2e8e231d4e1b9abc39612abf5e42 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/C/529e2e8e231d4e1b9abc39612abf5e42 2024-11-27T16:23:13,939 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/C/529e2e8e231d4e1b9abc39612abf5e42, entries=150, sequenceid=444, filesize=12.0 K 2024-11-27T16:23:13,940 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for 00f170535dc2739662302d98f22dc172 in 174ms, sequenceid=444, compaction requested=true 2024-11-27T16:23:13,940 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegion(2538): Flush status journal for 00f170535dc2739662302d98f22dc172: 2024-11-27T16:23:13,940 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172. 2024-11-27T16:23:13,940 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=141 2024-11-27T16:23:13,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4106): Remote procedure done, pid=141 2024-11-27T16:23:13,943 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=141, resume processing ppid=140 2024-11-27T16:23:13,943 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=141, ppid=140, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 481 msec 2024-11-27T16:23:13,944 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=140, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=140, table=TestAcidGuarantees in 485 msec 2024-11-27T16:23:13,988 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 00f170535dc2739662302d98f22dc172 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-11-27T16:23:13,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(8581): Flush requested on 00f170535dc2739662302d98f22dc172 2024-11-27T16:23:13,992 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 00f170535dc2739662302d98f22dc172, store=A 2024-11-27T16:23:13,992 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:23:13,993 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 00f170535dc2739662302d98f22dc172, store=B 2024-11-27T16:23:13,993 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:23:13,993 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 00f170535dc2739662302d98f22dc172, store=C 2024-11-27T16:23:13,993 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:23:14,006 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/A/a40b2bec98cb4c2b8a113862d051afef is 50, key is test_row_0/A:col10/1732724593988/Put/seqid=0 2024-11-27T16:23:14,019 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:14,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 183 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38758 deadline: 1732724654000, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:14,020 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:14,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 187 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38730 deadline: 1732724654002, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:14,029 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:14,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 185 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38716 deadline: 1732724654012, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:14,035 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:14,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 185 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38754 deadline: 1732724654020, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:14,047 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:14,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 189 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38740 deadline: 1732724654030, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:14,050 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742419_1595 (size=17181) 2024-11-27T16:23:14,051 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=471 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/A/a40b2bec98cb4c2b8a113862d051afef 2024-11-27T16:23:14,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=140 2024-11-27T16:23:14,062 INFO [Thread-2216 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 140 completed 2024-11-27T16:23:14,063 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-27T16:23:14,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] procedure2.ProcedureExecutor(1098): Stored pid=142, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=142, table=TestAcidGuarantees 2024-11-27T16:23:14,065 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=142, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=142, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-27T16:23:14,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=142 2024-11-27T16:23:14,066 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=142, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=142, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-27T16:23:14,066 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=143, ppid=142, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-27T16:23:14,067 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/B/6c01b22228e443d590540dd56006ce2a is 50, key is test_row_0/B:col10/1732724593988/Put/seqid=0 2024-11-27T16:23:14,106 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742420_1596 (size=12301) 2024-11-27T16:23:14,129 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:14,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 185 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38758 deadline: 1732724654121, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:14,130 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:14,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 189 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38730 deadline: 1732724654121, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:14,141 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:14,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 187 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38716 deadline: 1732724654130, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:14,146 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:14,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 187 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38754 deadline: 1732724654137, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:14,159 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:14,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 191 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38740 deadline: 1732724654149, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:14,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=142 2024-11-27T16:23:14,187 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/B/f04fea1417df4934a4f7c323065fb1d7 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/B/f04fea1417df4934a4f7c323065fb1d7 2024-11-27T16:23:14,194 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 00f170535dc2739662302d98f22dc172/B of 00f170535dc2739662302d98f22dc172 into f04fea1417df4934a4f7c323065fb1d7(size=13.0 K), total size for store is 25.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T16:23:14,194 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 00f170535dc2739662302d98f22dc172: 2024-11-27T16:23:14,194 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172., storeName=00f170535dc2739662302d98f22dc172/B, priority=13, startTime=1732724593653; duration=0sec 2024-11-27T16:23:14,194 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T16:23:14,194 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 00f170535dc2739662302d98f22dc172:B 2024-11-27T16:23:14,218 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:23:14,219 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=143 2024-11-27T16:23:14,219 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172. 2024-11-27T16:23:14,219 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172. as already flushing 2024-11-27T16:23:14,219 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172. 2024-11-27T16:23:14,219 ERROR [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] handler.RSProcedureHandler(58): pid=143 java.io.IOException: Unable to complete flush {ENCODED => 00f170535dc2739662302d98f22dc172, NAME => 'TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:23:14,219 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=143 java.io.IOException: Unable to complete flush {ENCODED => 00f170535dc2739662302d98f22dc172, NAME => 'TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:23:14,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4114): Remote procedure failed, pid=143 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 00f170535dc2739662302d98f22dc172, NAME => 'TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 00f170535dc2739662302d98f22dc172, NAME => 'TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:23:14,341 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:14,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 187 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38758 deadline: 1732724654331, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:14,346 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:14,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 191 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38730 deadline: 1732724654335, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:14,349 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:14,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 189 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38716 deadline: 1732724654343, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:14,356 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:14,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 189 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38754 deadline: 1732724654350, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:14,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=142 2024-11-27T16:23:14,368 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:14,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 193 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38740 deadline: 1732724654361, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:14,371 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:23:14,372 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=143 2024-11-27T16:23:14,372 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172. 2024-11-27T16:23:14,372 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172. as already flushing 2024-11-27T16:23:14,372 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172. 2024-11-27T16:23:14,372 ERROR [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] handler.RSProcedureHandler(58): pid=143 java.io.IOException: Unable to complete flush {ENCODED => 00f170535dc2739662302d98f22dc172, NAME => 'TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:23:14,373 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=143 java.io.IOException: Unable to complete flush {ENCODED => 00f170535dc2739662302d98f22dc172, NAME => 'TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:23:14,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4114): Remote procedure failed, pid=143 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 00f170535dc2739662302d98f22dc172, NAME => 'TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 00f170535dc2739662302d98f22dc172, NAME => 'TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:23:14,507 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=471 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/B/6c01b22228e443d590540dd56006ce2a 2024-11-27T16:23:14,516 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/C/2950bb6aa7d141dd9dd46481ad5ab175 is 50, key is test_row_0/C:col10/1732724593988/Put/seqid=0 2024-11-27T16:23:14,526 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:23:14,526 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=143 2024-11-27T16:23:14,526 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172. 2024-11-27T16:23:14,526 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172. as already flushing 2024-11-27T16:23:14,526 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172. 2024-11-27T16:23:14,526 ERROR [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] handler.RSProcedureHandler(58): pid=143 java.io.IOException: Unable to complete flush {ENCODED => 00f170535dc2739662302d98f22dc172, NAME => 'TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:23:14,527 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=143 java.io.IOException: Unable to complete flush {ENCODED => 00f170535dc2739662302d98f22dc172, NAME => 'TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:23:14,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4114): Remote procedure failed, pid=143 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 00f170535dc2739662302d98f22dc172, NAME => 'TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 00f170535dc2739662302d98f22dc172, NAME => 'TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:23:14,561 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742421_1597 (size=12301) 2024-11-27T16:23:14,562 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=471 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/C/2950bb6aa7d141dd9dd46481ad5ab175 2024-11-27T16:23:14,570 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/A/a40b2bec98cb4c2b8a113862d051afef as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/A/a40b2bec98cb4c2b8a113862d051afef 2024-11-27T16:23:14,574 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/A/a40b2bec98cb4c2b8a113862d051afef, entries=250, sequenceid=471, filesize=16.8 K 2024-11-27T16:23:14,575 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/B/6c01b22228e443d590540dd56006ce2a as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/B/6c01b22228e443d590540dd56006ce2a 2024-11-27T16:23:14,580 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/B/6c01b22228e443d590540dd56006ce2a, entries=150, sequenceid=471, filesize=12.0 K 2024-11-27T16:23:14,580 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/C/2950bb6aa7d141dd9dd46481ad5ab175 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/C/2950bb6aa7d141dd9dd46481ad5ab175 2024-11-27T16:23:14,587 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/C/2950bb6aa7d141dd9dd46481ad5ab175, entries=150, sequenceid=471, filesize=12.0 K 2024-11-27T16:23:14,592 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~161.02 KB/164880, heapSize ~422.58 KB/432720, currentSize=40.25 KB/41220 for 00f170535dc2739662302d98f22dc172 in 604ms, sequenceid=471, compaction requested=true 2024-11-27T16:23:14,592 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 00f170535dc2739662302d98f22dc172: 2024-11-27T16:23:14,593 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 00f170535dc2739662302d98f22dc172:A, priority=-2147483648, current under compaction store size is 1 2024-11-27T16:23:14,593 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-27T16:23:14,593 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 00f170535dc2739662302d98f22dc172:B, priority=-2147483648, current under compaction store size is 2 2024-11-27T16:23:14,593 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-11-27T16:23:14,593 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 00f170535dc2739662302d98f22dc172:C, priority=-2147483648, current under compaction store size is 3 2024-11-27T16:23:14,593 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=3), splitQueue=0 2024-11-27T16:23:14,593 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 5 store files, 0 compacting, 5 eligible, 16 blocking 2024-11-27T16:23:14,593 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-27T16:23:14,595 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 42805 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-27T16:23:14,595 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HStore(1540): 00f170535dc2739662302d98f22dc172/A is initiating minor compaction (all files) 2024-11-27T16:23:14,595 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 00f170535dc2739662302d98f22dc172/A in TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172. 2024-11-27T16:23:14,595 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/A/333c5f8488d0457c9b04488b7ba6fe54, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/A/d19abd991f084a37bfbba9a71f90dbf1, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/A/a40b2bec98cb4c2b8a113862d051afef] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp, totalSize=41.8 K 2024-11-27T16:23:14,596 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 5 files of size 62425 starting at candidate #0 after considering 6 permutations with 6 in ratio 2024-11-27T16:23:14,596 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HStore(1540): 00f170535dc2739662302d98f22dc172/C is initiating minor compaction (all files) 2024-11-27T16:23:14,596 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 00f170535dc2739662302d98f22dc172/C in TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172. 2024-11-27T16:23:14,596 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/C/59c38ccfa5b74759a91333b880d512bd, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/C/2a90cc06e1754d31b1d2ef5b9c499b1d, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/C/43e05408466d42569a4e8051dee0e621, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/C/529e2e8e231d4e1b9abc39612abf5e42, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/C/2950bb6aa7d141dd9dd46481ad5ab175] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp, totalSize=61.0 K 2024-11-27T16:23:14,597 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting 333c5f8488d0457c9b04488b7ba6fe54, keycount=150, bloomtype=ROW, size=13.0 K, encoding=NONE, compression=NONE, seqNum=431, earliestPutTs=1732724593303 2024-11-27T16:23:14,597 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 59c38ccfa5b74759a91333b880d512bd, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=390, earliestPutTs=1732724591947 2024-11-27T16:23:14,598 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting d19abd991f084a37bfbba9a71f90dbf1, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=444, earliestPutTs=1732724593438 2024-11-27T16:23:14,598 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2a90cc06e1754d31b1d2ef5b9c499b1d, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=403, earliestPutTs=1732724592086 2024-11-27T16:23:14,598 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting a40b2bec98cb4c2b8a113862d051afef, keycount=250, bloomtype=ROW, size=16.8 K, encoding=NONE, compression=NONE, seqNum=471, earliestPutTs=1732724593865 2024-11-27T16:23:14,598 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 43e05408466d42569a4e8051dee0e621, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=431, earliestPutTs=1732724593303 2024-11-27T16:23:14,599 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 529e2e8e231d4e1b9abc39612abf5e42, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=444, earliestPutTs=1732724593438 2024-11-27T16:23:14,599 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2950bb6aa7d141dd9dd46481ad5ab175, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=471, earliestPutTs=1732724593865 2024-11-27T16:23:14,615 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 00f170535dc2739662302d98f22dc172#A#compaction#512 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-27T16:23:14,616 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/A/bb58fb4a14b4471599bbe79c21c6dbab is 50, key is test_row_0/A:col10/1732724593988/Put/seqid=0 2024-11-27T16:23:14,626 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 00f170535dc2739662302d98f22dc172#C#compaction#513 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-27T16:23:14,627 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/C/7186d5fde2244e1b9816b82de79900ce is 50, key is test_row_0/C:col10/1732724593988/Put/seqid=0 2024-11-27T16:23:14,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(8581): Flush requested on 00f170535dc2739662302d98f22dc172 2024-11-27T16:23:14,649 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 00f170535dc2739662302d98f22dc172 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-27T16:23:14,649 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 00f170535dc2739662302d98f22dc172, store=A 2024-11-27T16:23:14,649 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:23:14,650 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 00f170535dc2739662302d98f22dc172, store=B 2024-11-27T16:23:14,650 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:23:14,650 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 00f170535dc2739662302d98f22dc172, store=C 2024-11-27T16:23:14,650 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:23:14,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=142 2024-11-27T16:23:14,679 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:23:14,679 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=143 2024-11-27T16:23:14,679 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172. 2024-11-27T16:23:14,679 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172. as already flushing 2024-11-27T16:23:14,679 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172. 2024-11-27T16:23:14,679 ERROR [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] handler.RSProcedureHandler(58): pid=143 java.io.IOException: Unable to complete flush {ENCODED => 00f170535dc2739662302d98f22dc172, NAME => 'TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:23:14,679 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=143 java.io.IOException: Unable to complete flush {ENCODED => 00f170535dc2739662302d98f22dc172, NAME => 'TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:23:14,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4114): Remote procedure failed, pid=143 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 00f170535dc2739662302d98f22dc172, NAME => 'TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 00f170535dc2739662302d98f22dc172, NAME => 'TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:23:14,695 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742422_1598 (size=13425) 2024-11-27T16:23:14,702 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/A/9de8c476983248efbb99bc0edfb874a8 is 50, key is test_row_0/A:col10/1732724594645/Put/seqid=0 2024-11-27T16:23:14,723 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742423_1599 (size=13391) 2024-11-27T16:23:14,729 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/C/7186d5fde2244e1b9816b82de79900ce as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/C/7186d5fde2244e1b9816b82de79900ce 2024-11-27T16:23:14,734 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 5 (all) file(s) in 00f170535dc2739662302d98f22dc172/C of 00f170535dc2739662302d98f22dc172 into 7186d5fde2244e1b9816b82de79900ce(size=13.1 K), total size for store is 13.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T16:23:14,734 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 00f170535dc2739662302d98f22dc172: 2024-11-27T16:23:14,734 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172., storeName=00f170535dc2739662302d98f22dc172/C, priority=11, startTime=1732724594593; duration=0sec 2024-11-27T16:23:14,734 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-27T16:23:14,734 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 00f170535dc2739662302d98f22dc172:C 2024-11-27T16:23:14,734 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:14,734 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-27T16:23:14,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 195 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38754 deadline: 1732724654719, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:14,735 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:14,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 198 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38740 deadline: 1732724654721, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:14,736 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:14,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 196 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38758 deadline: 1732724654722, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:14,736 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:14,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 199 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38730 deadline: 1732724654723, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:14,737 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:14,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 196 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38716 deadline: 1732724654723, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:14,738 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37925 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-27T16:23:14,738 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HStore(1540): 00f170535dc2739662302d98f22dc172/B is initiating minor compaction (all files) 2024-11-27T16:23:14,738 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 00f170535dc2739662302d98f22dc172/B in TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172. 2024-11-27T16:23:14,738 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/B/f04fea1417df4934a4f7c323065fb1d7, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/B/2190680471c3439baf8fc1e6064ef09b, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/B/6c01b22228e443d590540dd56006ce2a] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp, totalSize=37.0 K 2024-11-27T16:23:14,738 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.Compactor(224): Compacting f04fea1417df4934a4f7c323065fb1d7, keycount=150, bloomtype=ROW, size=13.0 K, encoding=NONE, compression=NONE, seqNum=431, earliestPutTs=1732724593303 2024-11-27T16:23:14,739 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2190680471c3439baf8fc1e6064ef09b, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=444, earliestPutTs=1732724593438 2024-11-27T16:23:14,739 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6c01b22228e443d590540dd56006ce2a, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=471, earliestPutTs=1732724593865 2024-11-27T16:23:14,758 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742424_1600 (size=14737) 2024-11-27T16:23:14,758 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=483 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/A/9de8c476983248efbb99bc0edfb874a8 2024-11-27T16:23:14,765 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 00f170535dc2739662302d98f22dc172#B#compaction#515 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-27T16:23:14,765 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/B/e79c9bd72186448aa7e33cb734901486 is 50, key is test_row_0/B:col10/1732724593988/Put/seqid=0 2024-11-27T16:23:14,774 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/B/16e239afdccc444996582b6f011210ac is 50, key is test_row_0/B:col10/1732724594645/Put/seqid=0 2024-11-27T16:23:14,822 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742425_1601 (size=13425) 2024-11-27T16:23:14,827 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/B/e79c9bd72186448aa7e33cb734901486 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/B/e79c9bd72186448aa7e33cb734901486 2024-11-27T16:23:14,831 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:23:14,832 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=143 2024-11-27T16:23:14,832 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172. 2024-11-27T16:23:14,832 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172. as already flushing 2024-11-27T16:23:14,832 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172. 2024-11-27T16:23:14,832 ERROR [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] handler.RSProcedureHandler(58): pid=143 java.io.IOException: Unable to complete flush {ENCODED => 00f170535dc2739662302d98f22dc172, NAME => 'TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:23:14,833 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=143 java.io.IOException: Unable to complete flush {ENCODED => 00f170535dc2739662302d98f22dc172, NAME => 'TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:23:14,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4114): Remote procedure failed, pid=143 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 00f170535dc2739662302d98f22dc172, NAME => 'TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 00f170535dc2739662302d98f22dc172, NAME => 'TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:23:14,835 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 00f170535dc2739662302d98f22dc172/B of 00f170535dc2739662302d98f22dc172 into e79c9bd72186448aa7e33cb734901486(size=13.1 K), total size for store is 13.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T16:23:14,835 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 00f170535dc2739662302d98f22dc172: 2024-11-27T16:23:14,835 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172., storeName=00f170535dc2739662302d98f22dc172/B, priority=13, startTime=1732724594593; duration=0sec 2024-11-27T16:23:14,835 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T16:23:14,835 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 00f170535dc2739662302d98f22dc172:B 2024-11-27T16:23:14,849 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:14,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 197 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38754 deadline: 1732724654836, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:14,849 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:14,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 198 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38758 deadline: 1732724654837, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:14,850 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:14,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 201 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38730 deadline: 1732724654838, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:14,850 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:14,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 200 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38740 deadline: 1732724654836, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:14,851 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:14,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 198 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38716 deadline: 1732724654840, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:14,857 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742426_1602 (size=9857) 2024-11-27T16:23:14,985 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:23:14,986 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=143 2024-11-27T16:23:14,986 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172. 2024-11-27T16:23:14,986 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172. as already flushing 2024-11-27T16:23:14,986 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172. 2024-11-27T16:23:14,986 ERROR [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] handler.RSProcedureHandler(58): pid=143 java.io.IOException: Unable to complete flush {ENCODED => 00f170535dc2739662302d98f22dc172, NAME => 'TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:23:14,987 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=143 java.io.IOException: Unable to complete flush {ENCODED => 00f170535dc2739662302d98f22dc172, NAME => 'TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:23:14,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4114): Remote procedure failed, pid=143 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 00f170535dc2739662302d98f22dc172, NAME => 'TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 00f170535dc2739662302d98f22dc172, NAME => 'TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:23:15,057 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:15,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 203 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38730 deadline: 1732724655052, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:15,059 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:15,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 200 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38758 deadline: 1732724655052, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:15,059 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:15,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 202 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38740 deadline: 1732724655052, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:15,060 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:15,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 200 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38716 deadline: 1732724655053, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:15,060 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:15,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 199 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38754 deadline: 1732724655053, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:15,104 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/A/bb58fb4a14b4471599bbe79c21c6dbab as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/A/bb58fb4a14b4471599bbe79c21c6dbab 2024-11-27T16:23:15,109 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 00f170535dc2739662302d98f22dc172/A of 00f170535dc2739662302d98f22dc172 into bb58fb4a14b4471599bbe79c21c6dbab(size=13.1 K), total size for store is 13.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T16:23:15,109 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 00f170535dc2739662302d98f22dc172: 2024-11-27T16:23:15,109 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172., storeName=00f170535dc2739662302d98f22dc172/A, priority=13, startTime=1732724594593; duration=0sec 2024-11-27T16:23:15,110 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T16:23:15,110 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 00f170535dc2739662302d98f22dc172:A 2024-11-27T16:23:15,139 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:23:15,139 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=143 2024-11-27T16:23:15,139 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172. 2024-11-27T16:23:15,140 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172. as already flushing 2024-11-27T16:23:15,140 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172. 2024-11-27T16:23:15,140 ERROR [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] handler.RSProcedureHandler(58): pid=143 java.io.IOException: Unable to complete flush {ENCODED => 00f170535dc2739662302d98f22dc172, NAME => 'TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:23:15,140 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=143 java.io.IOException: Unable to complete flush {ENCODED => 00f170535dc2739662302d98f22dc172, NAME => 'TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:23:15,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4114): Remote procedure failed, pid=143 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 00f170535dc2739662302d98f22dc172, NAME => 'TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 00f170535dc2739662302d98f22dc172, NAME => 'TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:23:15,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=142 2024-11-27T16:23:15,258 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=483 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/B/16e239afdccc444996582b6f011210ac 2024-11-27T16:23:15,273 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/C/14dcbb8ddfe748dd9543842772e16fd1 is 50, key is test_row_0/C:col10/1732724594645/Put/seqid=0 2024-11-27T16:23:15,292 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:23:15,292 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=143 2024-11-27T16:23:15,292 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172. 2024-11-27T16:23:15,293 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172. as already flushing 2024-11-27T16:23:15,293 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172. 2024-11-27T16:23:15,293 ERROR [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] handler.RSProcedureHandler(58): pid=143 java.io.IOException: Unable to complete flush {ENCODED => 00f170535dc2739662302d98f22dc172, NAME => 'TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:23:15,293 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=143 java.io.IOException: Unable to complete flush {ENCODED => 00f170535dc2739662302d98f22dc172, NAME => 'TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:23:15,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4114): Remote procedure failed, pid=143 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 00f170535dc2739662302d98f22dc172, NAME => 'TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 00f170535dc2739662302d98f22dc172, NAME => 'TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:23:15,300 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742427_1603 (size=9857) 2024-11-27T16:23:15,301 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=483 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/C/14dcbb8ddfe748dd9543842772e16fd1 2024-11-27T16:23:15,321 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/A/9de8c476983248efbb99bc0edfb874a8 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/A/9de8c476983248efbb99bc0edfb874a8 2024-11-27T16:23:15,327 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/A/9de8c476983248efbb99bc0edfb874a8, entries=200, sequenceid=483, filesize=14.4 K 2024-11-27T16:23:15,330 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/B/16e239afdccc444996582b6f011210ac as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/B/16e239afdccc444996582b6f011210ac 2024-11-27T16:23:15,336 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/B/16e239afdccc444996582b6f011210ac, entries=100, sequenceid=483, filesize=9.6 K 2024-11-27T16:23:15,337 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/C/14dcbb8ddfe748dd9543842772e16fd1 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/C/14dcbb8ddfe748dd9543842772e16fd1 2024-11-27T16:23:15,341 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/C/14dcbb8ddfe748dd9543842772e16fd1, entries=100, sequenceid=483, filesize=9.6 K 2024-11-27T16:23:15,342 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=154.31 KB/158010 for 00f170535dc2739662302d98f22dc172 in 693ms, sequenceid=483, compaction requested=false 2024-11-27T16:23:15,342 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 00f170535dc2739662302d98f22dc172: 2024-11-27T16:23:15,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(8581): Flush requested on 00f170535dc2739662302d98f22dc172 2024-11-27T16:23:15,379 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 00f170535dc2739662302d98f22dc172 3/3 column families, dataSize=161.02 KB heapSize=422.63 KB 2024-11-27T16:23:15,381 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 00f170535dc2739662302d98f22dc172, store=A 2024-11-27T16:23:15,381 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:23:15,381 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 00f170535dc2739662302d98f22dc172, store=B 2024-11-27T16:23:15,381 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:23:15,381 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 00f170535dc2739662302d98f22dc172, store=C 2024-11-27T16:23:15,381 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:23:15,385 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/A/1631abbcdbb84e4da6007f7d96113987 is 50, key is test_row_0/A:col10/1732724595378/Put/seqid=0 2024-11-27T16:23:15,388 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:15,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 206 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38730 deadline: 1732724655381, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:15,388 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:15,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 202 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38754 deadline: 1732724655382, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:15,389 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:15,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 203 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38716 deadline: 1732724655383, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:15,391 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:15,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 204 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38758 deadline: 1732724655386, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:15,393 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:15,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 206 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38740 deadline: 1732724655388, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:15,428 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742428_1604 (size=17181) 2024-11-27T16:23:15,430 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=58.14 KB at sequenceid=515 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/A/1631abbcdbb84e4da6007f7d96113987 2024-11-27T16:23:15,445 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:23:15,446 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=143 2024-11-27T16:23:15,446 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172. 2024-11-27T16:23:15,446 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172. as already flushing 2024-11-27T16:23:15,446 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172. 2024-11-27T16:23:15,446 ERROR [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] handler.RSProcedureHandler(58): pid=143 java.io.IOException: Unable to complete flush {ENCODED => 00f170535dc2739662302d98f22dc172, NAME => 'TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:23:15,447 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=143 java.io.IOException: Unable to complete flush {ENCODED => 00f170535dc2739662302d98f22dc172, NAME => 'TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:23:15,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4114): Remote procedure failed, pid=143 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 00f170535dc2739662302d98f22dc172, NAME => 'TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 00f170535dc2739662302d98f22dc172, NAME => 'TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:23:15,448 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/B/568a3511e05f4e27b3418af17f810e37 is 50, key is test_row_0/B:col10/1732724595378/Put/seqid=0 2024-11-27T16:23:15,492 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:15,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 204 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38754 deadline: 1732724655489, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:15,492 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:15,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 208 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38730 deadline: 1732724655489, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:15,495 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:15,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 205 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38716 deadline: 1732724655491, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:15,497 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742429_1605 (size=12301) 2024-11-27T16:23:15,498 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=58.14 KB at sequenceid=515 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/B/568a3511e05f4e27b3418af17f810e37 2024-11-27T16:23:15,499 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:15,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 206 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38758 deadline: 1732724655492, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:15,502 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:15,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 208 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38740 deadline: 1732724655495, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:15,510 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/C/a44ec08f2bd541e396deef15fc6925bc is 50, key is test_row_0/C:col10/1732724595378/Put/seqid=0 2024-11-27T16:23:15,560 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742430_1606 (size=12301) 2024-11-27T16:23:15,600 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:23:15,601 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=143 2024-11-27T16:23:15,601 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172. 2024-11-27T16:23:15,601 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172. as already flushing 2024-11-27T16:23:15,601 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172. 2024-11-27T16:23:15,601 ERROR [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] handler.RSProcedureHandler(58): pid=143 java.io.IOException: Unable to complete flush {ENCODED => 00f170535dc2739662302d98f22dc172, NAME => 'TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:23:15,602 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=143 java.io.IOException: Unable to complete flush {ENCODED => 00f170535dc2739662302d98f22dc172, NAME => 'TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:23:15,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4114): Remote procedure failed, pid=143 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 00f170535dc2739662302d98f22dc172, NAME => 'TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 00f170535dc2739662302d98f22dc172, NAME => 'TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:23:15,699 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:15,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 210 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38730 deadline: 1732724655694, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:15,699 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:15,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 206 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38754 deadline: 1732724655695, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:15,702 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:15,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 207 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38716 deadline: 1732724655698, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:15,708 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:15,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 208 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38758 deadline: 1732724655701, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:15,712 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:15,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 210 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38740 deadline: 1732724655708, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:15,754 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:23:15,754 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=143 2024-11-27T16:23:15,754 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172. 2024-11-27T16:23:15,754 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172. as already flushing 2024-11-27T16:23:15,754 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172. 2024-11-27T16:23:15,754 ERROR [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] handler.RSProcedureHandler(58): pid=143 java.io.IOException: Unable to complete flush {ENCODED => 00f170535dc2739662302d98f22dc172, NAME => 'TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:23:15,755 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=143 java.io.IOException: Unable to complete flush {ENCODED => 00f170535dc2739662302d98f22dc172, NAME => 'TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:23:15,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4114): Remote procedure failed, pid=143 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 00f170535dc2739662302d98f22dc172, NAME => 'TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 00f170535dc2739662302d98f22dc172, NAME => 'TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:23:15,907 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:23:15,907 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=143 2024-11-27T16:23:15,907 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172. 2024-11-27T16:23:15,907 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172. as already flushing 2024-11-27T16:23:15,907 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172. 2024-11-27T16:23:15,907 ERROR [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] handler.RSProcedureHandler(58): pid=143 java.io.IOException: Unable to complete flush {ENCODED => 00f170535dc2739662302d98f22dc172, NAME => 'TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:23:15,908 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=143 java.io.IOException: Unable to complete flush {ENCODED => 00f170535dc2739662302d98f22dc172, NAME => 'TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:23:15,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4114): Remote procedure failed, pid=143 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 00f170535dc2739662302d98f22dc172, NAME => 'TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 00f170535dc2739662302d98f22dc172, NAME => 'TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:23:15,962 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=58.14 KB at sequenceid=515 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/C/a44ec08f2bd541e396deef15fc6925bc 2024-11-27T16:23:15,967 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/A/1631abbcdbb84e4da6007f7d96113987 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/A/1631abbcdbb84e4da6007f7d96113987 2024-11-27T16:23:15,973 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/A/1631abbcdbb84e4da6007f7d96113987, entries=250, sequenceid=515, filesize=16.8 K 2024-11-27T16:23:15,974 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/B/568a3511e05f4e27b3418af17f810e37 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/B/568a3511e05f4e27b3418af17f810e37 2024-11-27T16:23:15,979 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/B/568a3511e05f4e27b3418af17f810e37, entries=150, sequenceid=515, filesize=12.0 K 2024-11-27T16:23:15,980 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/C/a44ec08f2bd541e396deef15fc6925bc as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/C/a44ec08f2bd541e396deef15fc6925bc 2024-11-27T16:23:15,990 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/C/a44ec08f2bd541e396deef15fc6925bc, entries=150, sequenceid=515, filesize=12.0 K 2024-11-27T16:23:15,991 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~174.43 KB/178620, heapSize ~457.73 KB/468720, currentSize=26.84 KB/27480 for 00f170535dc2739662302d98f22dc172 in 611ms, sequenceid=515, compaction requested=true 2024-11-27T16:23:15,991 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 00f170535dc2739662302d98f22dc172: 2024-11-27T16:23:15,991 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-27T16:23:15,991 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 00f170535dc2739662302d98f22dc172:A, priority=-2147483648, current under compaction store size is 1 2024-11-27T16:23:15,991 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T16:23:15,992 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-27T16:23:15,992 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 45343 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-27T16:23:15,992 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HStore(1540): 00f170535dc2739662302d98f22dc172/A is initiating minor compaction (all files) 2024-11-27T16:23:15,992 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 00f170535dc2739662302d98f22dc172/A in TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172. 2024-11-27T16:23:15,992 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/A/bb58fb4a14b4471599bbe79c21c6dbab, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/A/9de8c476983248efbb99bc0edfb874a8, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/A/1631abbcdbb84e4da6007f7d96113987] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp, totalSize=44.3 K 2024-11-27T16:23:15,993 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.Compactor(224): Compacting bb58fb4a14b4471599bbe79c21c6dbab, keycount=150, bloomtype=ROW, size=13.1 K, encoding=NONE, compression=NONE, seqNum=471, earliestPutTs=1732724593865 2024-11-27T16:23:15,994 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 00f170535dc2739662302d98f22dc172:B, priority=-2147483648, current under compaction store size is 2 2024-11-27T16:23:15,994 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T16:23:15,994 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 00f170535dc2739662302d98f22dc172:C, priority=-2147483648, current under compaction store size is 3 2024-11-27T16:23:15,994 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-27T16:23:15,994 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 35583 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-27T16:23:15,994 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HStore(1540): 00f170535dc2739662302d98f22dc172/B is initiating minor compaction (all files) 2024-11-27T16:23:15,994 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 00f170535dc2739662302d98f22dc172/B in TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172. 2024-11-27T16:23:15,994 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/B/e79c9bd72186448aa7e33cb734901486, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/B/16e239afdccc444996582b6f011210ac, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/B/568a3511e05f4e27b3418af17f810e37] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp, totalSize=34.7 K 2024-11-27T16:23:15,994 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9de8c476983248efbb99bc0edfb874a8, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=483, earliestPutTs=1732724594011 2024-11-27T16:23:15,994 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting e79c9bd72186448aa7e33cb734901486, keycount=150, bloomtype=ROW, size=13.1 K, encoding=NONE, compression=NONE, seqNum=471, earliestPutTs=1732724593865 2024-11-27T16:23:15,995 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1631abbcdbb84e4da6007f7d96113987, keycount=250, bloomtype=ROW, size=16.8 K, encoding=NONE, compression=NONE, seqNum=515, earliestPutTs=1732724594721 2024-11-27T16:23:15,995 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting 16e239afdccc444996582b6f011210ac, keycount=100, bloomtype=ROW, size=9.6 K, encoding=NONE, compression=NONE, seqNum=483, earliestPutTs=1732724594645 2024-11-27T16:23:15,996 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting 568a3511e05f4e27b3418af17f810e37, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=515, earliestPutTs=1732724595365 2024-11-27T16:23:16,006 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 00f170535dc2739662302d98f22dc172#A#compaction#521 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-27T16:23:16,006 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/A/c222219510cf4e17939071f28b30e023 is 50, key is test_row_0/A:col10/1732724595378/Put/seqid=0 2024-11-27T16:23:16,007 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 00f170535dc2739662302d98f22dc172#B#compaction#522 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-27T16:23:16,008 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/B/706a93df883f47b5a046cbb36dced085 is 50, key is test_row_0/B:col10/1732724595378/Put/seqid=0 2024-11-27T16:23:16,016 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 00f170535dc2739662302d98f22dc172 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-11-27T16:23:16,016 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 00f170535dc2739662302d98f22dc172, store=A 2024-11-27T16:23:16,017 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:23:16,017 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 00f170535dc2739662302d98f22dc172, store=B 2024-11-27T16:23:16,017 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:23:16,017 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 00f170535dc2739662302d98f22dc172, store=C 2024-11-27T16:23:16,017 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:23:16,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(8581): Flush requested on 00f170535dc2739662302d98f22dc172 2024-11-27T16:23:16,033 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/A/a6082630179d4e27bcd44c353e3ab108 is 50, key is test_row_0/A:col10/1732724596014/Put/seqid=0 2024-11-27T16:23:16,060 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:23:16,060 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=143 2024-11-27T16:23:16,060 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172. 2024-11-27T16:23:16,060 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172. as already flushing 2024-11-27T16:23:16,060 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172. 2024-11-27T16:23:16,061 ERROR [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] handler.RSProcedureHandler(58): pid=143 java.io.IOException: Unable to complete flush {ENCODED => 00f170535dc2739662302d98f22dc172, NAME => 'TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:23:16,061 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=143 java.io.IOException: Unable to complete flush {ENCODED => 00f170535dc2739662302d98f22dc172, NAME => 'TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:23:16,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4114): Remote procedure failed, pid=143 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 00f170535dc2739662302d98f22dc172, NAME => 'TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 00f170535dc2739662302d98f22dc172, NAME => 'TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:23:16,074 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:16,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 213 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38754 deadline: 1732724656060, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:16,080 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:16,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 216 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38740 deadline: 1732724656069, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:16,081 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:16,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 215 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38758 deadline: 1732724656070, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:16,082 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:16,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 218 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38730 deadline: 1732724656071, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:16,085 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:16,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 215 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38716 deadline: 1732724656074, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:16,092 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742432_1608 (size=13527) 2024-11-27T16:23:16,101 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742431_1607 (size=13527) 2024-11-27T16:23:16,127 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742433_1609 (size=17181) 2024-11-27T16:23:16,127 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=527 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/A/a6082630179d4e27bcd44c353e3ab108 2024-11-27T16:23:16,153 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/B/fc0bb60ceebd4308a2017ba38662667b is 50, key is test_row_0/B:col10/1732724596014/Put/seqid=0 2024-11-27T16:23:16,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=142 2024-11-27T16:23:16,184 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:16,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 215 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38754 deadline: 1732724656175, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:16,185 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:16,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 218 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38740 deadline: 1732724656181, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:16,186 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:16,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 217 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38758 deadline: 1732724656182, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:16,193 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:16,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 220 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38730 deadline: 1732724656183, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:16,196 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:16,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 217 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38716 deadline: 1732724656187, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:16,201 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742434_1610 (size=12301) 2024-11-27T16:23:16,202 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=527 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/B/fc0bb60ceebd4308a2017ba38662667b 2024-11-27T16:23:16,212 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:23:16,213 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=143 2024-11-27T16:23:16,213 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172. 2024-11-27T16:23:16,213 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172. as already flushing 2024-11-27T16:23:16,213 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172. 2024-11-27T16:23:16,213 ERROR [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] handler.RSProcedureHandler(58): pid=143 java.io.IOException: Unable to complete flush {ENCODED => 00f170535dc2739662302d98f22dc172, NAME => 'TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:23:16,213 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=143 java.io.IOException: Unable to complete flush {ENCODED => 00f170535dc2739662302d98f22dc172, NAME => 'TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:23:16,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4114): Remote procedure failed, pid=143 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 00f170535dc2739662302d98f22dc172, NAME => 'TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 00f170535dc2739662302d98f22dc172, NAME => 'TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:23:16,230 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/C/f7922bce7a4c4289bca4fd4d6493222c is 50, key is test_row_0/C:col10/1732724596014/Put/seqid=0 2024-11-27T16:23:16,273 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742435_1611 (size=12301) 2024-11-27T16:23:16,365 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:23:16,366 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=143 2024-11-27T16:23:16,366 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172. 2024-11-27T16:23:16,366 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172. as already flushing 2024-11-27T16:23:16,366 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172. 2024-11-27T16:23:16,366 ERROR [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] handler.RSProcedureHandler(58): pid=143 java.io.IOException: Unable to complete flush {ENCODED => 00f170535dc2739662302d98f22dc172, NAME => 'TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:23:16,366 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=143 java.io.IOException: Unable to complete flush {ENCODED => 00f170535dc2739662302d98f22dc172, NAME => 'TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:23:16,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4114): Remote procedure failed, pid=143 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 00f170535dc2739662302d98f22dc172, NAME => 'TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 00f170535dc2739662302d98f22dc172, NAME => 'TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:23:16,395 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:16,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 220 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38740 deadline: 1732724656386, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:16,395 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:16,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 219 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38758 deadline: 1732724656387, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:16,396 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:16,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 217 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38754 deadline: 1732724656387, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:16,398 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:16,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 222 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38730 deadline: 1732724656395, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:16,407 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:16,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 219 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38716 deadline: 1732724656397, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:16,501 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/B/706a93df883f47b5a046cbb36dced085 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/B/706a93df883f47b5a046cbb36dced085 2024-11-27T16:23:16,505 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 00f170535dc2739662302d98f22dc172/B of 00f170535dc2739662302d98f22dc172 into 706a93df883f47b5a046cbb36dced085(size=13.2 K), total size for store is 13.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T16:23:16,506 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 00f170535dc2739662302d98f22dc172: 2024-11-27T16:23:16,506 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172., storeName=00f170535dc2739662302d98f22dc172/B, priority=13, startTime=1732724595991; duration=0sec 2024-11-27T16:23:16,506 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-27T16:23:16,506 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 00f170535dc2739662302d98f22dc172:B 2024-11-27T16:23:16,506 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-27T16:23:16,507 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 35549 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-27T16:23:16,507 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HStore(1540): 00f170535dc2739662302d98f22dc172/C is initiating minor compaction (all files) 2024-11-27T16:23:16,507 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 00f170535dc2739662302d98f22dc172/C in TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172. 2024-11-27T16:23:16,507 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/C/7186d5fde2244e1b9816b82de79900ce, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/C/14dcbb8ddfe748dd9543842772e16fd1, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/C/a44ec08f2bd541e396deef15fc6925bc] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp, totalSize=34.7 K 2024-11-27T16:23:16,508 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting 7186d5fde2244e1b9816b82de79900ce, keycount=150, bloomtype=ROW, size=13.1 K, encoding=NONE, compression=NONE, seqNum=471, earliestPutTs=1732724593865 2024-11-27T16:23:16,508 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/A/c222219510cf4e17939071f28b30e023 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/A/c222219510cf4e17939071f28b30e023 2024-11-27T16:23:16,508 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting 14dcbb8ddfe748dd9543842772e16fd1, keycount=100, bloomtype=ROW, size=9.6 K, encoding=NONE, compression=NONE, seqNum=483, earliestPutTs=1732724594645 2024-11-27T16:23:16,508 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting a44ec08f2bd541e396deef15fc6925bc, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=515, earliestPutTs=1732724595365 2024-11-27T16:23:16,513 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 00f170535dc2739662302d98f22dc172/A of 00f170535dc2739662302d98f22dc172 into c222219510cf4e17939071f28b30e023(size=13.2 K), total size for store is 13.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T16:23:16,513 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 00f170535dc2739662302d98f22dc172: 2024-11-27T16:23:16,513 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172., storeName=00f170535dc2739662302d98f22dc172/A, priority=13, startTime=1732724595991; duration=0sec 2024-11-27T16:23:16,513 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T16:23:16,513 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 00f170535dc2739662302d98f22dc172:A 2024-11-27T16:23:16,518 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:23:16,519 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=143 2024-11-27T16:23:16,519 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172. 2024-11-27T16:23:16,519 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172. as already flushing 2024-11-27T16:23:16,519 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172. 2024-11-27T16:23:16,519 ERROR [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] handler.RSProcedureHandler(58): pid=143 java.io.IOException: Unable to complete flush {ENCODED => 00f170535dc2739662302d98f22dc172, NAME => 'TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:23:16,519 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=143 java.io.IOException: Unable to complete flush {ENCODED => 00f170535dc2739662302d98f22dc172, NAME => 'TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:23:16,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4114): Remote procedure failed, pid=143 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 00f170535dc2739662302d98f22dc172, NAME => 'TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 00f170535dc2739662302d98f22dc172, NAME => 'TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:23:16,528 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 00f170535dc2739662302d98f22dc172#C#compaction#526 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-27T16:23:16,528 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/C/ee7b1752fb6e46a6a9021f9377602447 is 50, key is test_row_0/C:col10/1732724595378/Put/seqid=0 2024-11-27T16:23:16,562 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742436_1612 (size=13493) 2024-11-27T16:23:16,568 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/C/ee7b1752fb6e46a6a9021f9377602447 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/C/ee7b1752fb6e46a6a9021f9377602447 2024-11-27T16:23:16,573 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 00f170535dc2739662302d98f22dc172/C of 00f170535dc2739662302d98f22dc172 into ee7b1752fb6e46a6a9021f9377602447(size=13.2 K), total size for store is 13.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T16:23:16,573 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 00f170535dc2739662302d98f22dc172: 2024-11-27T16:23:16,573 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172., storeName=00f170535dc2739662302d98f22dc172/C, priority=13, startTime=1732724595994; duration=0sec 2024-11-27T16:23:16,573 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T16:23:16,573 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 00f170535dc2739662302d98f22dc172:C 2024-11-27T16:23:16,672 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:23:16,672 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=143 2024-11-27T16:23:16,672 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172. 2024-11-27T16:23:16,673 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172. as already flushing 2024-11-27T16:23:16,673 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172. 2024-11-27T16:23:16,673 ERROR [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] handler.RSProcedureHandler(58): pid=143 java.io.IOException: Unable to complete flush {ENCODED => 00f170535dc2739662302d98f22dc172, NAME => 'TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:23:16,673 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=143 java.io.IOException: Unable to complete flush {ENCODED => 00f170535dc2739662302d98f22dc172, NAME => 'TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:23:16,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4114): Remote procedure failed, pid=143 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 00f170535dc2739662302d98f22dc172, NAME => 'TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 00f170535dc2739662302d98f22dc172, NAME => 'TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:23:16,675 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=527 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/C/f7922bce7a4c4289bca4fd4d6493222c 2024-11-27T16:23:16,679 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/A/a6082630179d4e27bcd44c353e3ab108 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/A/a6082630179d4e27bcd44c353e3ab108 2024-11-27T16:23:16,683 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/A/a6082630179d4e27bcd44c353e3ab108, entries=250, sequenceid=527, filesize=16.8 K 2024-11-27T16:23:16,684 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/B/fc0bb60ceebd4308a2017ba38662667b as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/B/fc0bb60ceebd4308a2017ba38662667b 2024-11-27T16:23:16,687 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/B/fc0bb60ceebd4308a2017ba38662667b, entries=150, sequenceid=527, filesize=12.0 K 2024-11-27T16:23:16,688 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/C/f7922bce7a4c4289bca4fd4d6493222c as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/C/f7922bce7a4c4289bca4fd4d6493222c 2024-11-27T16:23:16,695 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/C/f7922bce7a4c4289bca4fd4d6493222c, entries=150, sequenceid=527, filesize=12.0 K 2024-11-27T16:23:16,696 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for 00f170535dc2739662302d98f22dc172 in 680ms, sequenceid=527, compaction requested=false 2024-11-27T16:23:16,696 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 00f170535dc2739662302d98f22dc172: 2024-11-27T16:23:16,712 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 00f170535dc2739662302d98f22dc172 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-11-27T16:23:16,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(8581): Flush requested on 00f170535dc2739662302d98f22dc172 2024-11-27T16:23:16,713 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 00f170535dc2739662302d98f22dc172, store=A 2024-11-27T16:23:16,713 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:23:16,713 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 00f170535dc2739662302d98f22dc172, store=B 2024-11-27T16:23:16,713 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:23:16,713 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 00f170535dc2739662302d98f22dc172, store=C 2024-11-27T16:23:16,713 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:23:16,724 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/A/5e56273cc7f942ee9d02bf24f20caa68 is 50, key is test_row_0/A:col10/1732724596711/Put/seqid=0 2024-11-27T16:23:16,730 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:16,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 222 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38716 deadline: 1732724656721, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:16,736 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:16,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 223 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38758 deadline: 1732724656724, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:16,736 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:16,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 221 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38754 deadline: 1732724656726, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:16,736 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:16,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 224 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38740 deadline: 1732724656728, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:16,738 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:16,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 226 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38730 deadline: 1732724656730, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:16,762 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742437_1613 (size=14741) 2024-11-27T16:23:16,825 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:23:16,825 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=143 2024-11-27T16:23:16,825 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172. 2024-11-27T16:23:16,826 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172. as already flushing 2024-11-27T16:23:16,826 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172. 2024-11-27T16:23:16,826 ERROR [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] handler.RSProcedureHandler(58): pid=143 java.io.IOException: Unable to complete flush {ENCODED => 00f170535dc2739662302d98f22dc172, NAME => 'TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:23:16,826 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=143 java.io.IOException: Unable to complete flush {ENCODED => 00f170535dc2739662302d98f22dc172, NAME => 'TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:23:16,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4114): Remote procedure failed, pid=143 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 00f170535dc2739662302d98f22dc172, NAME => 'TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 00f170535dc2739662302d98f22dc172, NAME => 'TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:23:16,834 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:16,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 224 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38716 deadline: 1732724656832, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:16,844 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:16,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 225 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38758 deadline: 1732724656837, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:16,845 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:16,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 223 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38754 deadline: 1732724656837, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:16,846 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:16,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 226 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38740 deadline: 1732724656837, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:16,846 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:16,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 228 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38730 deadline: 1732724656840, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:16,979 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:23:16,979 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=143 2024-11-27T16:23:16,979 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172. 2024-11-27T16:23:16,979 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172. as already flushing 2024-11-27T16:23:16,979 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172. 2024-11-27T16:23:16,979 ERROR [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] handler.RSProcedureHandler(58): pid=143 java.io.IOException: Unable to complete flush {ENCODED => 00f170535dc2739662302d98f22dc172, NAME => 'TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:23:16,980 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=143 java.io.IOException: Unable to complete flush {ENCODED => 00f170535dc2739662302d98f22dc172, NAME => 'TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:23:16,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4114): Remote procedure failed, pid=143 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 00f170535dc2739662302d98f22dc172, NAME => 'TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 00f170535dc2739662302d98f22dc172, NAME => 'TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:23:17,043 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:17,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 226 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38716 deadline: 1732724657037, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:17,052 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:17,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 225 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38754 deadline: 1732724657047, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:17,052 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:17,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 228 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38740 deadline: 1732724657048, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:17,055 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:17,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 230 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38730 deadline: 1732724657049, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:17,057 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:17,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 227 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38758 deadline: 1732724657052, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:17,132 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:23:17,132 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=143 2024-11-27T16:23:17,132 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172. 2024-11-27T16:23:17,133 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172. as already flushing 2024-11-27T16:23:17,133 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172. 2024-11-27T16:23:17,133 ERROR [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] handler.RSProcedureHandler(58): pid=143 java.io.IOException: Unable to complete flush {ENCODED => 00f170535dc2739662302d98f22dc172, NAME => 'TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:23:17,133 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=143 java.io.IOException: Unable to complete flush {ENCODED => 00f170535dc2739662302d98f22dc172, NAME => 'TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:23:17,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4114): Remote procedure failed, pid=143 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 00f170535dc2739662302d98f22dc172, NAME => 'TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 00f170535dc2739662302d98f22dc172, NAME => 'TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:23:17,157 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=557 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/A/5e56273cc7f942ee9d02bf24f20caa68 2024-11-27T16:23:17,168 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/B/c6e940daa04146d4986b5c83f861eb00 is 50, key is test_row_0/B:col10/1732724596711/Put/seqid=0 2024-11-27T16:23:17,217 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742438_1614 (size=12301) 2024-11-27T16:23:17,218 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=557 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/B/c6e940daa04146d4986b5c83f861eb00 2024-11-27T16:23:17,225 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/C/161d1ae182084c189a52c45551d13ffe is 50, key is test_row_0/C:col10/1732724596711/Put/seqid=0 2024-11-27T16:23:17,266 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742439_1615 (size=12301) 2024-11-27T16:23:17,285 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:23:17,286 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=143 2024-11-27T16:23:17,286 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172. 2024-11-27T16:23:17,287 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172. as already flushing 2024-11-27T16:23:17,287 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172. 2024-11-27T16:23:17,287 ERROR [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] handler.RSProcedureHandler(58): pid=143 java.io.IOException: Unable to complete flush {ENCODED => 00f170535dc2739662302d98f22dc172, NAME => 'TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:23:17,287 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=143 java.io.IOException: Unable to complete flush {ENCODED => 00f170535dc2739662302d98f22dc172, NAME => 'TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:23:17,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4114): Remote procedure failed, pid=143 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 00f170535dc2739662302d98f22dc172, NAME => 'TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 00f170535dc2739662302d98f22dc172, NAME => 'TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:23:17,351 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:17,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 228 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38716 deadline: 1732724657346, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:17,358 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:17,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 227 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38754 deadline: 1732724657353, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:17,362 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:17,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 230 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38740 deadline: 1732724657354, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:17,362 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:17,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 232 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38730 deadline: 1732724657356, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:17,367 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:17,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 229 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38758 deadline: 1732724657359, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:17,440 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:23:17,441 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=143 2024-11-27T16:23:17,441 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172. 2024-11-27T16:23:17,441 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172. as already flushing 2024-11-27T16:23:17,441 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172. 2024-11-27T16:23:17,441 ERROR [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] handler.RSProcedureHandler(58): pid=143 java.io.IOException: Unable to complete flush {ENCODED => 00f170535dc2739662302d98f22dc172, NAME => 'TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:23:17,441 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=143 java.io.IOException: Unable to complete flush {ENCODED => 00f170535dc2739662302d98f22dc172, NAME => 'TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:23:17,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4114): Remote procedure failed, pid=143 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 00f170535dc2739662302d98f22dc172, NAME => 'TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 00f170535dc2739662302d98f22dc172, NAME => 'TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:23:17,593 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:23:17,594 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=143 2024-11-27T16:23:17,594 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172. 2024-11-27T16:23:17,595 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172. as already flushing 2024-11-27T16:23:17,595 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172. 2024-11-27T16:23:17,595 ERROR [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] handler.RSProcedureHandler(58): pid=143 java.io.IOException: Unable to complete flush {ENCODED => 00f170535dc2739662302d98f22dc172, NAME => 'TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:23:17,595 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=143 java.io.IOException: Unable to complete flush {ENCODED => 00f170535dc2739662302d98f22dc172, NAME => 'TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:23:17,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4114): Remote procedure failed, pid=143 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 00f170535dc2739662302d98f22dc172, NAME => 'TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 00f170535dc2739662302d98f22dc172, NAME => 'TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:23:17,667 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=557 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/C/161d1ae182084c189a52c45551d13ffe 2024-11-27T16:23:17,671 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/A/5e56273cc7f942ee9d02bf24f20caa68 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/A/5e56273cc7f942ee9d02bf24f20caa68 2024-11-27T16:23:17,676 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/A/5e56273cc7f942ee9d02bf24f20caa68, entries=200, sequenceid=557, filesize=14.4 K 2024-11-27T16:23:17,677 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/B/c6e940daa04146d4986b5c83f861eb00 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/B/c6e940daa04146d4986b5c83f861eb00 2024-11-27T16:23:17,682 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/B/c6e940daa04146d4986b5c83f861eb00, entries=150, sequenceid=557, filesize=12.0 K 2024-11-27T16:23:17,683 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/C/161d1ae182084c189a52c45551d13ffe as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/C/161d1ae182084c189a52c45551d13ffe 2024-11-27T16:23:17,692 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/C/161d1ae182084c189a52c45551d13ffe, entries=150, sequenceid=557, filesize=12.0 K 2024-11-27T16:23:17,693 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~161.02 KB/164880, heapSize ~422.58 KB/432720, currentSize=40.25 KB/41220 for 00f170535dc2739662302d98f22dc172 in 981ms, sequenceid=557, compaction requested=true 2024-11-27T16:23:17,693 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 00f170535dc2739662302d98f22dc172: 2024-11-27T16:23:17,693 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 00f170535dc2739662302d98f22dc172:A, priority=-2147483648, current under compaction store size is 1 2024-11-27T16:23:17,693 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T16:23:17,693 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 00f170535dc2739662302d98f22dc172:B, priority=-2147483648, current under compaction store size is 2 2024-11-27T16:23:17,693 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T16:23:17,694 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-27T16:23:17,694 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 00f170535dc2739662302d98f22dc172:C, priority=-2147483648, current under compaction store size is 3 2024-11-27T16:23:17,694 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-27T16:23:17,694 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-27T16:23:17,695 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 38129 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-27T16:23:17,695 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HStore(1540): 00f170535dc2739662302d98f22dc172/B is initiating minor compaction (all files) 2024-11-27T16:23:17,695 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 00f170535dc2739662302d98f22dc172/B in TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172. 2024-11-27T16:23:17,695 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/B/706a93df883f47b5a046cbb36dced085, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/B/fc0bb60ceebd4308a2017ba38662667b, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/B/c6e940daa04146d4986b5c83f861eb00] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp, totalSize=37.2 K 2024-11-27T16:23:17,695 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 45449 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-27T16:23:17,695 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HStore(1540): 00f170535dc2739662302d98f22dc172/A is initiating minor compaction (all files) 2024-11-27T16:23:17,695 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 00f170535dc2739662302d98f22dc172/A in TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172. 2024-11-27T16:23:17,695 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/A/c222219510cf4e17939071f28b30e023, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/A/a6082630179d4e27bcd44c353e3ab108, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/A/5e56273cc7f942ee9d02bf24f20caa68] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp, totalSize=44.4 K 2024-11-27T16:23:17,696 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.Compactor(224): Compacting c222219510cf4e17939071f28b30e023, keycount=150, bloomtype=ROW, size=13.2 K, encoding=NONE, compression=NONE, seqNum=515, earliestPutTs=1732724595365 2024-11-27T16:23:17,696 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting 706a93df883f47b5a046cbb36dced085, keycount=150, bloomtype=ROW, size=13.2 K, encoding=NONE, compression=NONE, seqNum=515, earliestPutTs=1732724595365 2024-11-27T16:23:17,696 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.Compactor(224): Compacting a6082630179d4e27bcd44c353e3ab108, keycount=250, bloomtype=ROW, size=16.8 K, encoding=NONE, compression=NONE, seqNum=527, earliestPutTs=1732724595387 2024-11-27T16:23:17,697 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting fc0bb60ceebd4308a2017ba38662667b, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=527, earliestPutTs=1732724595387 2024-11-27T16:23:17,697 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5e56273cc7f942ee9d02bf24f20caa68, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=557, earliestPutTs=1732724596070 2024-11-27T16:23:17,697 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting c6e940daa04146d4986b5c83f861eb00, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=557, earliestPutTs=1732724596070 2024-11-27T16:23:17,705 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 00f170535dc2739662302d98f22dc172#A#compaction#530 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-27T16:23:17,706 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/A/7d8bea0d8a2a4048bb1a90363b50bd73 is 50, key is test_row_0/A:col10/1732724596711/Put/seqid=0 2024-11-27T16:23:17,715 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 00f170535dc2739662302d98f22dc172#B#compaction#531 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-27T16:23:17,716 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/B/945bdf9c4e584f49b61d6f0da43e79e5 is 50, key is test_row_0/B:col10/1732724596711/Put/seqid=0 2024-11-27T16:23:17,747 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:23:17,748 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=143 2024-11-27T16:23:17,748 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172. 2024-11-27T16:23:17,748 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegion(2837): Flushing 00f170535dc2739662302d98f22dc172 3/3 column families, dataSize=40.25 KB heapSize=106.22 KB 2024-11-27T16:23:17,749 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 00f170535dc2739662302d98f22dc172, store=A 2024-11-27T16:23:17,749 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:23:17,749 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 00f170535dc2739662302d98f22dc172, store=B 2024-11-27T16:23:17,749 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:23:17,749 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 00f170535dc2739662302d98f22dc172, store=C 2024-11-27T16:23:17,749 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:23:17,768 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742440_1616 (size=13629) 2024-11-27T16:23:17,774 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/A/7d8bea0d8a2a4048bb1a90363b50bd73 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/A/7d8bea0d8a2a4048bb1a90363b50bd73 2024-11-27T16:23:17,780 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 00f170535dc2739662302d98f22dc172/A of 00f170535dc2739662302d98f22dc172 into 7d8bea0d8a2a4048bb1a90363b50bd73(size=13.3 K), total size for store is 13.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T16:23:17,780 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 00f170535dc2739662302d98f22dc172: 2024-11-27T16:23:17,780 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172., storeName=00f170535dc2739662302d98f22dc172/A, priority=13, startTime=1732724597693; duration=0sec 2024-11-27T16:23:17,780 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-27T16:23:17,780 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 00f170535dc2739662302d98f22dc172:A 2024-11-27T16:23:17,780 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-27T16:23:17,781 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 38095 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-27T16:23:17,781 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HStore(1540): 00f170535dc2739662302d98f22dc172/C is initiating minor compaction (all files) 2024-11-27T16:23:17,781 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 00f170535dc2739662302d98f22dc172/C in TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172. 2024-11-27T16:23:17,781 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/C/ee7b1752fb6e46a6a9021f9377602447, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/C/f7922bce7a4c4289bca4fd4d6493222c, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/C/161d1ae182084c189a52c45551d13ffe] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp, totalSize=37.2 K 2024-11-27T16:23:17,782 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.Compactor(224): Compacting ee7b1752fb6e46a6a9021f9377602447, keycount=150, bloomtype=ROW, size=13.2 K, encoding=NONE, compression=NONE, seqNum=515, earliestPutTs=1732724595365 2024-11-27T16:23:17,782 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.Compactor(224): Compacting f7922bce7a4c4289bca4fd4d6493222c, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=527, earliestPutTs=1732724595387 2024-11-27T16:23:17,782 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742441_1617 (size=13629) 2024-11-27T16:23:17,783 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 161d1ae182084c189a52c45551d13ffe, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=557, earliestPutTs=1732724596070 2024-11-27T16:23:17,791 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/B/945bdf9c4e584f49b61d6f0da43e79e5 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/B/945bdf9c4e584f49b61d6f0da43e79e5 2024-11-27T16:23:17,796 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/A/61804fc8a03f4537845e708a904d4c89 is 50, key is test_row_0/A:col10/1732724596726/Put/seqid=0 2024-11-27T16:23:17,796 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 00f170535dc2739662302d98f22dc172/B of 00f170535dc2739662302d98f22dc172 into 945bdf9c4e584f49b61d6f0da43e79e5(size=13.3 K), total size for store is 13.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T16:23:17,796 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 00f170535dc2739662302d98f22dc172: 2024-11-27T16:23:17,796 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172., storeName=00f170535dc2739662302d98f22dc172/B, priority=13, startTime=1732724597693; duration=0sec 2024-11-27T16:23:17,796 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T16:23:17,796 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 00f170535dc2739662302d98f22dc172:B 2024-11-27T16:23:17,801 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 00f170535dc2739662302d98f22dc172#C#compaction#533 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-27T16:23:17,801 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/C/1d86837c25774122a5ac89e65672bab7 is 50, key is test_row_0/C:col10/1732724596711/Put/seqid=0 2024-11-27T16:23:17,857 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742442_1618 (size=12301) 2024-11-27T16:23:17,858 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.42 KB at sequenceid=566 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/A/61804fc8a03f4537845e708a904d4c89 2024-11-27T16:23:17,860 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742443_1619 (size=13595) 2024-11-27T16:23:17,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(8581): Flush requested on 00f170535dc2739662302d98f22dc172 2024-11-27T16:23:17,868 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172. as already flushing 2024-11-27T16:23:17,872 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/B/20b65366f0744631bf2c509cbc7c32fe is 50, key is test_row_0/B:col10/1732724596726/Put/seqid=0 2024-11-27T16:23:17,919 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742444_1620 (size=12301) 2024-11-27T16:23:17,939 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:17,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 235 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38716 deadline: 1732724657927, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:17,939 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:17,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 234 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38754 deadline: 1732724657928, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:17,940 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:17,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 235 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38758 deadline: 1732724657929, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:17,946 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:17,949 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:17,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 237 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38740 deadline: 1732724657938, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:17,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 239 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38730 deadline: 1732724657937, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:18,047 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:18,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 236 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38754 deadline: 1732724658042, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:18,048 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:18,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 237 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38716 deadline: 1732724658042, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:18,049 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:18,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 237 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38758 deadline: 1732724658042, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:18,051 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:18,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 239 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38740 deadline: 1732724658051, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:18,055 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:18,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 241 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38730 deadline: 1732724658055, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:18,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=142 2024-11-27T16:23:18,261 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:18,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 239 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38716 deadline: 1732724658249, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:18,262 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:18,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 238 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38754 deadline: 1732724658249, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:18,262 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:18,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 239 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38758 deadline: 1732724658250, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:18,262 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:18,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 241 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38740 deadline: 1732724658253, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:18,270 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/C/1d86837c25774122a5ac89e65672bab7 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/C/1d86837c25774122a5ac89e65672bab7 2024-11-27T16:23:18,273 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:18,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 243 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38730 deadline: 1732724658258, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:18,277 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 00f170535dc2739662302d98f22dc172/C of 00f170535dc2739662302d98f22dc172 into 1d86837c25774122a5ac89e65672bab7(size=13.3 K), total size for store is 13.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T16:23:18,277 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 00f170535dc2739662302d98f22dc172: 2024-11-27T16:23:18,277 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172., storeName=00f170535dc2739662302d98f22dc172/C, priority=13, startTime=1732724597694; duration=0sec 2024-11-27T16:23:18,277 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T16:23:18,277 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 00f170535dc2739662302d98f22dc172:C 2024-11-27T16:23:18,322 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.42 KB at sequenceid=566 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/B/20b65366f0744631bf2c509cbc7c32fe 2024-11-27T16:23:18,342 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/C/cff6fe14ca2e4aad91ea331c79c83f53 is 50, key is test_row_0/C:col10/1732724596726/Put/seqid=0 2024-11-27T16:23:18,386 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742445_1621 (size=12301) 2024-11-27T16:23:18,387 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.42 KB at sequenceid=566 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/C/cff6fe14ca2e4aad91ea331c79c83f53 2024-11-27T16:23:18,392 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/A/61804fc8a03f4537845e708a904d4c89 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/A/61804fc8a03f4537845e708a904d4c89 2024-11-27T16:23:18,396 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/A/61804fc8a03f4537845e708a904d4c89, entries=150, sequenceid=566, filesize=12.0 K 2024-11-27T16:23:18,398 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/B/20b65366f0744631bf2c509cbc7c32fe as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/B/20b65366f0744631bf2c509cbc7c32fe 2024-11-27T16:23:18,402 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/B/20b65366f0744631bf2c509cbc7c32fe, entries=150, sequenceid=566, filesize=12.0 K 2024-11-27T16:23:18,403 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/C/cff6fe14ca2e4aad91ea331c79c83f53 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/C/cff6fe14ca2e4aad91ea331c79c83f53 2024-11-27T16:23:18,407 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/C/cff6fe14ca2e4aad91ea331c79c83f53, entries=150, sequenceid=566, filesize=12.0 K 2024-11-27T16:23:18,408 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegion(3040): Finished flush of dataSize ~40.25 KB/41220, heapSize ~106.17 KB/108720, currentSize=161.02 KB/164880 for 00f170535dc2739662302d98f22dc172 in 660ms, sequenceid=566, compaction requested=false 2024-11-27T16:23:18,408 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegion(2538): Flush status journal for 00f170535dc2739662302d98f22dc172: 2024-11-27T16:23:18,408 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172. 2024-11-27T16:23:18,408 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=143 2024-11-27T16:23:18,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4106): Remote procedure done, pid=143 2024-11-27T16:23:18,411 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=143, resume processing ppid=142 2024-11-27T16:23:18,411 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=143, ppid=142, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 4.3440 sec 2024-11-27T16:23:18,413 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=142, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=142, table=TestAcidGuarantees in 4.3480 sec 2024-11-27T16:23:18,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(8581): Flush requested on 00f170535dc2739662302d98f22dc172 2024-11-27T16:23:18,571 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 00f170535dc2739662302d98f22dc172 3/3 column families, dataSize=167.72 KB heapSize=440.20 KB 2024-11-27T16:23:18,573 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 00f170535dc2739662302d98f22dc172, store=A 2024-11-27T16:23:18,573 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:23:18,573 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 00f170535dc2739662302d98f22dc172, store=B 2024-11-27T16:23:18,573 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:23:18,573 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 00f170535dc2739662302d98f22dc172, store=C 2024-11-27T16:23:18,573 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:23:18,580 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/A/dde001a7a4b542af97e09ba5fd8bcd2a is 50, key is test_row_0/A:col10/1732724598569/Put/seqid=0 2024-11-27T16:23:18,585 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:18,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 244 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38740 deadline: 1732724658577, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:18,585 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:18,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 242 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38758 deadline: 1732724658577, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:18,586 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:18,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 242 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38754 deadline: 1732724658579, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:18,591 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:18,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 246 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38730 deadline: 1732724658585, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:18,592 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:18,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 243 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38716 deadline: 1732724658585, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:18,612 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742446_1622 (size=17181) 2024-11-27T16:23:18,689 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:18,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 246 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38740 deadline: 1732724658686, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:18,690 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:18,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 244 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38758 deadline: 1732724658686, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:18,690 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:18,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 244 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38754 deadline: 1732724658687, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:18,696 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:18,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 248 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38730 deadline: 1732724658693, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:18,696 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:18,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 245 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38716 deadline: 1732724658694, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:18,898 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:18,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 246 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38758 deadline: 1732724658892, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:18,899 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:18,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 248 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38740 deadline: 1732724658892, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:18,899 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:18,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 246 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38754 deadline: 1732724658892, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:18,901 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:18,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 250 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38730 deadline: 1732724658897, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:18,908 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:18,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 247 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38716 deadline: 1732724658898, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:19,013 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=58.14 KB at sequenceid=598 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/A/dde001a7a4b542af97e09ba5fd8bcd2a 2024-11-27T16:23:19,041 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/B/8f6456ddeda74d5987f22b81c7b8d456 is 50, key is test_row_0/B:col10/1732724598569/Put/seqid=0 2024-11-27T16:23:19,078 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742447_1623 (size=12301) 2024-11-27T16:23:19,084 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=58.14 KB at sequenceid=598 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/B/8f6456ddeda74d5987f22b81c7b8d456 2024-11-27T16:23:19,094 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/C/cbdc448e2e984a488a9f943abfdfa663 is 50, key is test_row_0/C:col10/1732724598569/Put/seqid=0 2024-11-27T16:23:19,127 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742448_1624 (size=12301) 2024-11-27T16:23:19,128 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=58.14 KB at sequenceid=598 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/C/cbdc448e2e984a488a9f943abfdfa663 2024-11-27T16:23:19,138 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/A/dde001a7a4b542af97e09ba5fd8bcd2a as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/A/dde001a7a4b542af97e09ba5fd8bcd2a 2024-11-27T16:23:19,146 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/A/dde001a7a4b542af97e09ba5fd8bcd2a, entries=250, sequenceid=598, filesize=16.8 K 2024-11-27T16:23:19,147 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/B/8f6456ddeda74d5987f22b81c7b8d456 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/B/8f6456ddeda74d5987f22b81c7b8d456 2024-11-27T16:23:19,151 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/B/8f6456ddeda74d5987f22b81c7b8d456, entries=150, sequenceid=598, filesize=12.0 K 2024-11-27T16:23:19,152 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/C/cbdc448e2e984a488a9f943abfdfa663 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/C/cbdc448e2e984a488a9f943abfdfa663 2024-11-27T16:23:19,156 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/C/cbdc448e2e984a488a9f943abfdfa663, entries=150, sequenceid=598, filesize=12.0 K 2024-11-27T16:23:19,157 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~174.43 KB/178620, heapSize ~457.73 KB/468720, currentSize=33.54 KB/34350 for 00f170535dc2739662302d98f22dc172 in 586ms, sequenceid=598, compaction requested=true 2024-11-27T16:23:19,157 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 00f170535dc2739662302d98f22dc172: 2024-11-27T16:23:19,157 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-27T16:23:19,158 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 43111 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-27T16:23:19,158 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HStore(1540): 00f170535dc2739662302d98f22dc172/A is initiating minor compaction (all files) 2024-11-27T16:23:19,158 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 00f170535dc2739662302d98f22dc172/A in TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172. 2024-11-27T16:23:19,158 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/A/7d8bea0d8a2a4048bb1a90363b50bd73, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/A/61804fc8a03f4537845e708a904d4c89, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/A/dde001a7a4b542af97e09ba5fd8bcd2a] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp, totalSize=42.1 K 2024-11-27T16:23:19,159 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7d8bea0d8a2a4048bb1a90363b50bd73, keycount=150, bloomtype=ROW, size=13.3 K, encoding=NONE, compression=NONE, seqNum=557, earliestPutTs=1732724596070 2024-11-27T16:23:19,159 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 00f170535dc2739662302d98f22dc172:A, priority=-2147483648, current under compaction store size is 1 2024-11-27T16:23:19,159 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T16:23:19,159 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 61804fc8a03f4537845e708a904d4c89, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=566, earliestPutTs=1732724596721 2024-11-27T16:23:19,160 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.Compactor(224): Compacting dde001a7a4b542af97e09ba5fd8bcd2a, keycount=250, bloomtype=ROW, size=16.8 K, encoding=NONE, compression=NONE, seqNum=598, earliestPutTs=1732724597927 2024-11-27T16:23:19,160 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-27T16:23:19,162 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 38231 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-27T16:23:19,162 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HStore(1540): 00f170535dc2739662302d98f22dc172/B is initiating minor compaction (all files) 2024-11-27T16:23:19,162 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 00f170535dc2739662302d98f22dc172/B in TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172. 2024-11-27T16:23:19,162 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/B/945bdf9c4e584f49b61d6f0da43e79e5, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/B/20b65366f0744631bf2c509cbc7c32fe, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/B/8f6456ddeda74d5987f22b81c7b8d456] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp, totalSize=37.3 K 2024-11-27T16:23:19,162 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting 945bdf9c4e584f49b61d6f0da43e79e5, keycount=150, bloomtype=ROW, size=13.3 K, encoding=NONE, compression=NONE, seqNum=557, earliestPutTs=1732724596070 2024-11-27T16:23:19,163 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting 20b65366f0744631bf2c509cbc7c32fe, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=566, earliestPutTs=1732724596721 2024-11-27T16:23:19,163 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting 8f6456ddeda74d5987f22b81c7b8d456, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=598, earliestPutTs=1732724597927 2024-11-27T16:23:19,168 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 00f170535dc2739662302d98f22dc172:B, priority=-2147483648, current under compaction store size is 2 2024-11-27T16:23:19,168 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T16:23:19,170 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 00f170535dc2739662302d98f22dc172:C, priority=-2147483648, current under compaction store size is 3 2024-11-27T16:23:19,170 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-27T16:23:19,172 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 00f170535dc2739662302d98f22dc172#A#compaction#539 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-27T16:23:19,173 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/A/a6513dad992444589961581e80148a78 is 50, key is test_row_0/A:col10/1732724598569/Put/seqid=0 2024-11-27T16:23:19,177 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 00f170535dc2739662302d98f22dc172#B#compaction#540 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-27T16:23:19,178 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/B/f5981941897b46f6aae3a1694afb64de is 50, key is test_row_0/B:col10/1732724598569/Put/seqid=0 2024-11-27T16:23:19,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(8581): Flush requested on 00f170535dc2739662302d98f22dc172 2024-11-27T16:23:19,225 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 00f170535dc2739662302d98f22dc172 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-11-27T16:23:19,226 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 00f170535dc2739662302d98f22dc172, store=A 2024-11-27T16:23:19,226 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:23:19,226 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 00f170535dc2739662302d98f22dc172, store=B 2024-11-27T16:23:19,226 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:23:19,226 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 00f170535dc2739662302d98f22dc172, store=C 2024-11-27T16:23:19,226 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:23:19,229 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742449_1625 (size=13731) 2024-11-27T16:23:19,235 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/A/a6513dad992444589961581e80148a78 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/A/a6513dad992444589961581e80148a78 2024-11-27T16:23:19,241 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 00f170535dc2739662302d98f22dc172/A of 00f170535dc2739662302d98f22dc172 into a6513dad992444589961581e80148a78(size=13.4 K), total size for store is 13.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T16:23:19,241 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 00f170535dc2739662302d98f22dc172: 2024-11-27T16:23:19,241 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172., storeName=00f170535dc2739662302d98f22dc172/A, priority=13, startTime=1732724599157; duration=0sec 2024-11-27T16:23:19,241 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-27T16:23:19,241 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 00f170535dc2739662302d98f22dc172:A 2024-11-27T16:23:19,241 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-27T16:23:19,243 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 38197 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-27T16:23:19,243 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HStore(1540): 00f170535dc2739662302d98f22dc172/C is initiating minor compaction (all files) 2024-11-27T16:23:19,243 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 00f170535dc2739662302d98f22dc172/C in TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172. 2024-11-27T16:23:19,243 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/C/1d86837c25774122a5ac89e65672bab7, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/C/cff6fe14ca2e4aad91ea331c79c83f53, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/C/cbdc448e2e984a488a9f943abfdfa663] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp, totalSize=37.3 K 2024-11-27T16:23:19,243 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1d86837c25774122a5ac89e65672bab7, keycount=150, bloomtype=ROW, size=13.3 K, encoding=NONE, compression=NONE, seqNum=557, earliestPutTs=1732724596070 2024-11-27T16:23:19,244 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.Compactor(224): Compacting cff6fe14ca2e4aad91ea331c79c83f53, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=566, earliestPutTs=1732724596721 2024-11-27T16:23:19,244 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.Compactor(224): Compacting cbdc448e2e984a488a9f943abfdfa663, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=598, earliestPutTs=1732724597927 2024-11-27T16:23:19,244 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742450_1626 (size=13731) 2024-11-27T16:23:19,248 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/A/6cecc4ab8db04ee38667131bce798d04 is 50, key is test_row_0/A:col10/1732724599213/Put/seqid=0 2024-11-27T16:23:19,253 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/B/f5981941897b46f6aae3a1694afb64de as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/B/f5981941897b46f6aae3a1694afb64de 2024-11-27T16:23:19,259 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 00f170535dc2739662302d98f22dc172/B of 00f170535dc2739662302d98f22dc172 into f5981941897b46f6aae3a1694afb64de(size=13.4 K), total size for store is 13.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T16:23:19,259 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 00f170535dc2739662302d98f22dc172: 2024-11-27T16:23:19,259 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172., storeName=00f170535dc2739662302d98f22dc172/B, priority=13, startTime=1732724599160; duration=0sec 2024-11-27T16:23:19,260 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T16:23:19,260 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 00f170535dc2739662302d98f22dc172:B 2024-11-27T16:23:19,270 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 00f170535dc2739662302d98f22dc172#C#compaction#542 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-27T16:23:19,270 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/C/72ff5ca082474f7ba52cd96a33d75f08 is 50, key is test_row_0/C:col10/1732724598569/Put/seqid=0 2024-11-27T16:23:19,289 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:19,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 255 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38740 deadline: 1732724659283, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:19,289 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:19,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 253 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38758 deadline: 1732724659284, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:19,293 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742451_1627 (size=12301) 2024-11-27T16:23:19,294 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=610 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/A/6cecc4ab8db04ee38667131bce798d04 2024-11-27T16:23:19,297 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:19,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 257 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38730 deadline: 1732724659286, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:19,298 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:19,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 253 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38754 deadline: 1732724659287, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:19,298 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:19,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 254 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38716 deadline: 1732724659288, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:19,308 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742452_1628 (size=13697) 2024-11-27T16:23:19,322 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/B/80a23444ffaa497293ff7f03f682eef7 is 50, key is test_row_0/B:col10/1732724599213/Put/seqid=0 2024-11-27T16:23:19,362 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742453_1629 (size=12301) 2024-11-27T16:23:19,394 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:19,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 257 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38740 deadline: 1732724659390, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:19,395 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:19,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 255 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38758 deadline: 1732724659391, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:19,407 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:19,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 259 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38730 deadline: 1732724659399, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:19,408 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:19,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 256 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38716 deadline: 1732724659399, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:19,408 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:19,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 255 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38754 deadline: 1732724659399, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:19,608 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:19,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 259 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38740 deadline: 1732724659597, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:19,608 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:19,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 257 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38758 deadline: 1732724659597, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:19,620 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:19,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 261 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38730 deadline: 1732724659609, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:19,620 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:19,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 258 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38716 deadline: 1732724659609, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:19,620 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:19,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 257 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38754 deadline: 1732724659610, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:19,714 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/C/72ff5ca082474f7ba52cd96a33d75f08 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/C/72ff5ca082474f7ba52cd96a33d75f08 2024-11-27T16:23:19,720 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 00f170535dc2739662302d98f22dc172/C of 00f170535dc2739662302d98f22dc172 into 72ff5ca082474f7ba52cd96a33d75f08(size=13.4 K), total size for store is 13.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T16:23:19,720 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 00f170535dc2739662302d98f22dc172: 2024-11-27T16:23:19,720 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172., storeName=00f170535dc2739662302d98f22dc172/C, priority=13, startTime=1732724599170; duration=0sec 2024-11-27T16:23:19,720 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T16:23:19,720 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 00f170535dc2739662302d98f22dc172:C 2024-11-27T16:23:19,763 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=610 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/B/80a23444ffaa497293ff7f03f682eef7 2024-11-27T16:23:19,786 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/C/bec4f1a459554817823b24840daa24c6 is 50, key is test_row_0/C:col10/1732724599213/Put/seqid=0 2024-11-27T16:23:19,837 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742454_1630 (size=12301) 2024-11-27T16:23:19,840 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=610 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/C/bec4f1a459554817823b24840daa24c6 2024-11-27T16:23:19,848 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/A/6cecc4ab8db04ee38667131bce798d04 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/A/6cecc4ab8db04ee38667131bce798d04 2024-11-27T16:23:19,852 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/A/6cecc4ab8db04ee38667131bce798d04, entries=150, sequenceid=610, filesize=12.0 K 2024-11-27T16:23:19,853 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/B/80a23444ffaa497293ff7f03f682eef7 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/B/80a23444ffaa497293ff7f03f682eef7 2024-11-27T16:23:19,858 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/B/80a23444ffaa497293ff7f03f682eef7, entries=150, sequenceid=610, filesize=12.0 K 2024-11-27T16:23:19,859 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/C/bec4f1a459554817823b24840daa24c6 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/C/bec4f1a459554817823b24840daa24c6 2024-11-27T16:23:19,863 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/C/bec4f1a459554817823b24840daa24c6, entries=150, sequenceid=610, filesize=12.0 K 2024-11-27T16:23:19,864 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for 00f170535dc2739662302d98f22dc172 in 639ms, sequenceid=610, compaction requested=false 2024-11-27T16:23:19,864 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 00f170535dc2739662302d98f22dc172: 2024-11-27T16:23:19,918 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 00f170535dc2739662302d98f22dc172 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-27T16:23:19,918 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 00f170535dc2739662302d98f22dc172, store=A 2024-11-27T16:23:19,919 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:23:19,919 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 00f170535dc2739662302d98f22dc172, store=B 2024-11-27T16:23:19,919 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:23:19,919 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 00f170535dc2739662302d98f22dc172, store=C 2024-11-27T16:23:19,919 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:23:19,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(8581): Flush requested on 00f170535dc2739662302d98f22dc172 2024-11-27T16:23:19,926 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/A/e0a60f49d79c442c8eca80795d778c1f is 50, key is test_row_0/A:col10/1732724599283/Put/seqid=0 2024-11-27T16:23:19,945 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:19,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 261 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38716 deadline: 1732724659933, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:19,946 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:19,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 264 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38740 deadline: 1732724659936, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:19,946 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:19,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 260 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38754 deadline: 1732724659934, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:19,946 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742455_1631 (size=14741) 2024-11-27T16:23:19,947 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=638 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/A/e0a60f49d79c442c8eca80795d778c1f 2024-11-27T16:23:19,956 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/B/7f28fefc293b4b24b7edc7c7ccbaa9ad is 50, key is test_row_0/B:col10/1732724599283/Put/seqid=0 2024-11-27T16:23:19,957 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:19,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 262 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38758 deadline: 1732724659945, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:19,959 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:19,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 265 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38730 deadline: 1732724659945, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:19,985 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742456_1632 (size=12301) 2024-11-27T16:23:19,986 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=638 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/B/7f28fefc293b4b24b7edc7c7ccbaa9ad 2024-11-27T16:23:20,013 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/C/f41a89400ed345ceb89edeb17d0fd77a is 50, key is test_row_0/C:col10/1732724599283/Put/seqid=0 2024-11-27T16:23:20,054 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:20,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 263 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38716 deadline: 1732724660046, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:20,055 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:20,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 266 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38740 deadline: 1732724660047, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:20,055 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:20,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 262 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38754 deadline: 1732724660047, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:20,067 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:20,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 264 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38758 deadline: 1732724660059, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:20,068 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:20,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 267 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38730 deadline: 1732724660060, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:20,069 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742457_1633 (size=12301) 2024-11-27T16:23:20,262 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:20,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 265 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38716 deadline: 1732724660255, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:20,263 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:20,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 264 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38754 deadline: 1732724660257, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:20,264 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:20,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 268 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38740 deadline: 1732724660257, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:20,275 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:20,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 269 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38730 deadline: 1732724660269, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:20,276 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:20,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 266 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38758 deadline: 1732724660270, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:20,470 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=638 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/C/f41a89400ed345ceb89edeb17d0fd77a 2024-11-27T16:23:20,476 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/A/e0a60f49d79c442c8eca80795d778c1f as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/A/e0a60f49d79c442c8eca80795d778c1f 2024-11-27T16:23:20,482 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/A/e0a60f49d79c442c8eca80795d778c1f, entries=200, sequenceid=638, filesize=14.4 K 2024-11-27T16:23:20,483 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/B/7f28fefc293b4b24b7edc7c7ccbaa9ad as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/B/7f28fefc293b4b24b7edc7c7ccbaa9ad 2024-11-27T16:23:20,491 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/B/7f28fefc293b4b24b7edc7c7ccbaa9ad, entries=150, sequenceid=638, filesize=12.0 K 2024-11-27T16:23:20,495 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/C/f41a89400ed345ceb89edeb17d0fd77a as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/C/f41a89400ed345ceb89edeb17d0fd77a 2024-11-27T16:23:20,514 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/C/f41a89400ed345ceb89edeb17d0fd77a, entries=150, sequenceid=638, filesize=12.0 K 2024-11-27T16:23:20,515 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=60.38 KB/61830 for 00f170535dc2739662302d98f22dc172 in 597ms, sequenceid=638, compaction requested=true 2024-11-27T16:23:20,515 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 00f170535dc2739662302d98f22dc172: 2024-11-27T16:23:20,515 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-27T16:23:20,516 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 00f170535dc2739662302d98f22dc172:A, priority=-2147483648, current under compaction store size is 1 2024-11-27T16:23:20,516 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T16:23:20,516 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-27T16:23:20,516 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 00f170535dc2739662302d98f22dc172:B, priority=-2147483648, current under compaction store size is 2 2024-11-27T16:23:20,516 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 40773 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-27T16:23:20,516 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HStore(1540): 00f170535dc2739662302d98f22dc172/A is initiating minor compaction (all files) 2024-11-27T16:23:20,516 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 00f170535dc2739662302d98f22dc172/A in TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172. 2024-11-27T16:23:20,517 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/A/a6513dad992444589961581e80148a78, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/A/6cecc4ab8db04ee38667131bce798d04, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/A/e0a60f49d79c442c8eca80795d778c1f] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp, totalSize=39.8 K 2024-11-27T16:23:20,517 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T16:23:20,517 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.Compactor(224): Compacting a6513dad992444589961581e80148a78, keycount=150, bloomtype=ROW, size=13.4 K, encoding=NONE, compression=NONE, seqNum=598, earliestPutTs=1732724597927 2024-11-27T16:23:20,517 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 38333 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-27T16:23:20,517 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HStore(1540): 00f170535dc2739662302d98f22dc172/B is initiating minor compaction (all files) 2024-11-27T16:23:20,517 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 00f170535dc2739662302d98f22dc172/B in TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172. 2024-11-27T16:23:20,517 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/B/f5981941897b46f6aae3a1694afb64de, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/B/80a23444ffaa497293ff7f03f682eef7, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/B/7f28fefc293b4b24b7edc7c7ccbaa9ad] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp, totalSize=37.4 K 2024-11-27T16:23:20,517 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6cecc4ab8db04ee38667131bce798d04, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=610, earliestPutTs=1732724598583 2024-11-27T16:23:20,517 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting f5981941897b46f6aae3a1694afb64de, keycount=150, bloomtype=ROW, size=13.4 K, encoding=NONE, compression=NONE, seqNum=598, earliestPutTs=1732724597927 2024-11-27T16:23:20,517 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.Compactor(224): Compacting e0a60f49d79c442c8eca80795d778c1f, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=638, earliestPutTs=1732724599282 2024-11-27T16:23:20,518 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting 80a23444ffaa497293ff7f03f682eef7, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=610, earliestPutTs=1732724598583 2024-11-27T16:23:20,518 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting 7f28fefc293b4b24b7edc7c7ccbaa9ad, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=638, earliestPutTs=1732724599282 2024-11-27T16:23:20,519 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 00f170535dc2739662302d98f22dc172:C, priority=-2147483648, current under compaction store size is 3 2024-11-27T16:23:20,519 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-27T16:23:20,527 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 00f170535dc2739662302d98f22dc172#B#compaction#548 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-27T16:23:20,528 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/B/55ca4f84eb934bc09989cb0b1451aa35 is 50, key is test_row_0/B:col10/1732724599283/Put/seqid=0 2024-11-27T16:23:20,535 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 00f170535dc2739662302d98f22dc172#A#compaction#549 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-27T16:23:20,536 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/A/0c5bb02c99ea417fba5f4e8835558513 is 50, key is test_row_0/A:col10/1732724599283/Put/seqid=0 2024-11-27T16:23:20,572 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742458_1634 (size=13833) 2024-11-27T16:23:20,575 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 00f170535dc2739662302d98f22dc172 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-11-27T16:23:20,575 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 00f170535dc2739662302d98f22dc172, store=A 2024-11-27T16:23:20,575 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:23:20,575 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 00f170535dc2739662302d98f22dc172, store=B 2024-11-27T16:23:20,575 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:23:20,575 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 00f170535dc2739662302d98f22dc172, store=C 2024-11-27T16:23:20,575 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:23:20,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(8581): Flush requested on 00f170535dc2739662302d98f22dc172 2024-11-27T16:23:20,581 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742459_1635 (size=13833) 2024-11-27T16:23:20,591 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/A/54397932bea844f4a9b3ed6b27e6c4ab is 50, key is test_row_0/A:col10/1732724600573/Put/seqid=0 2024-11-27T16:23:20,595 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/A/0c5bb02c99ea417fba5f4e8835558513 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/A/0c5bb02c99ea417fba5f4e8835558513 2024-11-27T16:23:20,600 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 00f170535dc2739662302d98f22dc172/A of 00f170535dc2739662302d98f22dc172 into 0c5bb02c99ea417fba5f4e8835558513(size=13.5 K), total size for store is 13.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T16:23:20,600 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 00f170535dc2739662302d98f22dc172: 2024-11-27T16:23:20,600 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172., storeName=00f170535dc2739662302d98f22dc172/A, priority=13, startTime=1732724600515; duration=0sec 2024-11-27T16:23:20,600 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-27T16:23:20,600 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 00f170535dc2739662302d98f22dc172:A 2024-11-27T16:23:20,600 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-27T16:23:20,601 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 38299 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-27T16:23:20,601 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HStore(1540): 00f170535dc2739662302d98f22dc172/C is initiating minor compaction (all files) 2024-11-27T16:23:20,601 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 00f170535dc2739662302d98f22dc172/C in TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172. 2024-11-27T16:23:20,601 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/C/72ff5ca082474f7ba52cd96a33d75f08, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/C/bec4f1a459554817823b24840daa24c6, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/C/f41a89400ed345ceb89edeb17d0fd77a] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp, totalSize=37.4 K 2024-11-27T16:23:20,602 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 72ff5ca082474f7ba52cd96a33d75f08, keycount=150, bloomtype=ROW, size=13.4 K, encoding=NONE, compression=NONE, seqNum=598, earliestPutTs=1732724597927 2024-11-27T16:23:20,602 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.Compactor(224): Compacting bec4f1a459554817823b24840daa24c6, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=610, earliestPutTs=1732724598583 2024-11-27T16:23:20,603 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.Compactor(224): Compacting f41a89400ed345ceb89edeb17d0fd77a, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=638, earliestPutTs=1732724599282 2024-11-27T16:23:20,617 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742460_1636 (size=12301) 2024-11-27T16:23:20,620 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=651 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/A/54397932bea844f4a9b3ed6b27e6c4ab 2024-11-27T16:23:20,632 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 00f170535dc2739662302d98f22dc172#C#compaction#551 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-27T16:23:20,633 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/C/e2f7f49962114a34b9a936431c208744 is 50, key is test_row_0/C:col10/1732724599283/Put/seqid=0 2024-11-27T16:23:20,639 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/B/837d1242784c4fb19ae9e189569e33ca is 50, key is test_row_0/B:col10/1732724600573/Put/seqid=0 2024-11-27T16:23:20,641 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:20,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 274 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38740 deadline: 1732724660627, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:20,642 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:20,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 275 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38730 deadline: 1732724660628, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:20,642 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:20,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 271 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38758 deadline: 1732724660628, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:20,650 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:20,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 272 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38716 deadline: 1732724660640, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:20,653 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:20,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 271 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38754 deadline: 1732724660641, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:20,677 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742461_1637 (size=13799) 2024-11-27T16:23:20,703 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742462_1638 (size=12301) 2024-11-27T16:23:20,751 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:20,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 276 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38740 deadline: 1732724660743, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:20,752 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:20,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 277 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38730 deadline: 1732724660743, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:20,753 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:20,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 273 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38758 deadline: 1732724660743, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:20,766 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:20,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 274 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38716 deadline: 1732724660753, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:20,766 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:20,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 273 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38754 deadline: 1732724660754, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:20,959 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:20,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 278 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38740 deadline: 1732724660953, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:20,959 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:20,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 279 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38730 deadline: 1732724660955, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:20,960 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:20,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 275 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38758 deadline: 1732724660955, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:20,971 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:20,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 276 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38716 deadline: 1732724660967, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:20,977 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/B/55ca4f84eb934bc09989cb0b1451aa35 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/B/55ca4f84eb934bc09989cb0b1451aa35 2024-11-27T16:23:20,977 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:20,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 275 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38754 deadline: 1732724660968, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:20,983 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 00f170535dc2739662302d98f22dc172/B of 00f170535dc2739662302d98f22dc172 into 55ca4f84eb934bc09989cb0b1451aa35(size=13.5 K), total size for store is 13.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T16:23:20,983 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 00f170535dc2739662302d98f22dc172: 2024-11-27T16:23:20,983 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172., storeName=00f170535dc2739662302d98f22dc172/B, priority=13, startTime=1732724600516; duration=0sec 2024-11-27T16:23:20,983 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T16:23:20,983 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 00f170535dc2739662302d98f22dc172:B 2024-11-27T16:23:21,084 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/C/e2f7f49962114a34b9a936431c208744 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/C/e2f7f49962114a34b9a936431c208744 2024-11-27T16:23:21,090 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 00f170535dc2739662302d98f22dc172/C of 00f170535dc2739662302d98f22dc172 into e2f7f49962114a34b9a936431c208744(size=13.5 K), total size for store is 13.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T16:23:21,090 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 00f170535dc2739662302d98f22dc172: 2024-11-27T16:23:21,090 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172., storeName=00f170535dc2739662302d98f22dc172/C, priority=13, startTime=1732724600517; duration=0sec 2024-11-27T16:23:21,090 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T16:23:21,090 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 00f170535dc2739662302d98f22dc172:C 2024-11-27T16:23:21,104 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=651 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/B/837d1242784c4fb19ae9e189569e33ca 2024-11-27T16:23:21,122 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/C/7b06a1b9108e417fb62cbb7071334a80 is 50, key is test_row_0/C:col10/1732724600573/Put/seqid=0 2024-11-27T16:23:21,148 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742463_1639 (size=12301) 2024-11-27T16:23:21,148 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=651 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/C/7b06a1b9108e417fb62cbb7071334a80 2024-11-27T16:23:21,155 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/A/54397932bea844f4a9b3ed6b27e6c4ab as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/A/54397932bea844f4a9b3ed6b27e6c4ab 2024-11-27T16:23:21,164 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/A/54397932bea844f4a9b3ed6b27e6c4ab, entries=150, sequenceid=651, filesize=12.0 K 2024-11-27T16:23:21,170 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/B/837d1242784c4fb19ae9e189569e33ca as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/B/837d1242784c4fb19ae9e189569e33ca 2024-11-27T16:23:21,177 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/B/837d1242784c4fb19ae9e189569e33ca, entries=150, sequenceid=651, filesize=12.0 K 2024-11-27T16:23:21,178 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/C/7b06a1b9108e417fb62cbb7071334a80 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/C/7b06a1b9108e417fb62cbb7071334a80 2024-11-27T16:23:21,183 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/C/7b06a1b9108e417fb62cbb7071334a80, entries=150, sequenceid=651, filesize=12.0 K 2024-11-27T16:23:21,184 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for 00f170535dc2739662302d98f22dc172 in 609ms, sequenceid=651, compaction requested=false 2024-11-27T16:23:21,184 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 00f170535dc2739662302d98f22dc172: 2024-11-27T16:23:21,277 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 00f170535dc2739662302d98f22dc172 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-11-27T16:23:21,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(8581): Flush requested on 00f170535dc2739662302d98f22dc172 2024-11-27T16:23:21,279 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 00f170535dc2739662302d98f22dc172, store=A 2024-11-27T16:23:21,279 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:23:21,279 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 00f170535dc2739662302d98f22dc172, store=B 2024-11-27T16:23:21,279 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:23:21,279 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 00f170535dc2739662302d98f22dc172, store=C 2024-11-27T16:23:21,279 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:23:21,292 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-27T16:23:21,297 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/A/f76c4d10e63e4cf7949e469e560104e2 is 50, key is test_row_0/A:col10/1732724601277/Put/seqid=0 2024-11-27T16:23:21,332 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:21,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 282 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38740 deadline: 1732724661297, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:21,335 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742464_1640 (size=14741) 2024-11-27T16:23:21,336 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=680 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/A/f76c4d10e63e4cf7949e469e560104e2 2024-11-27T16:23:21,345 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/B/5aa6157fee434248a63be95d46e13d9a is 50, key is test_row_0/B:col10/1732724601277/Put/seqid=0 2024-11-27T16:23:21,349 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:21,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 278 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38754 deadline: 1732724661323, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:21,349 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:21,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 279 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38758 deadline: 1732724661324, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:21,350 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:21,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 280 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38716 deadline: 1732724661328, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:21,350 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:21,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 284 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38730 deadline: 1732724661333, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:21,381 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742465_1641 (size=12301) 2024-11-27T16:23:21,382 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=680 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/B/5aa6157fee434248a63be95d46e13d9a 2024-11-27T16:23:21,398 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/C/9fd47d61c04c4dc5a651fd28b82f2efc is 50, key is test_row_0/C:col10/1732724601277/Put/seqid=0 2024-11-27T16:23:21,440 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:21,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 284 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38740 deadline: 1732724661434, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:21,445 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742466_1642 (size=12301) 2024-11-27T16:23:21,455 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:21,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 280 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38754 deadline: 1732724661451, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:21,456 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:21,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 281 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38758 deadline: 1732724661451, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:21,456 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:21,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 282 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38716 deadline: 1732724661451, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:21,457 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:21,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 286 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38730 deadline: 1732724661451, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:21,569 DEBUG [Thread-2225 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x56a4483a to 127.0.0.1:51088 2024-11-27T16:23:21,569 DEBUG [Thread-2225 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-27T16:23:21,571 DEBUG [Thread-2217 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x42e690d6 to 127.0.0.1:51088 2024-11-27T16:23:21,571 DEBUG [Thread-2217 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-27T16:23:21,572 DEBUG [Thread-2221 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x00df2701 to 127.0.0.1:51088 2024-11-27T16:23:21,572 DEBUG [Thread-2221 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-27T16:23:21,576 DEBUG [Thread-2223 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x35ca71a1 to 127.0.0.1:51088 2024-11-27T16:23:21,577 DEBUG [Thread-2223 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-27T16:23:21,577 DEBUG [Thread-2219 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x3abeec20 to 127.0.0.1:51088 2024-11-27T16:23:21,577 DEBUG [Thread-2219 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-27T16:23:21,643 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:21,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 286 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38740 deadline: 1732724661643, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:21,658 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:21,658 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:21,658 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:21,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 282 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38754 deadline: 1732724661658, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:21,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 283 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38758 deadline: 1732724661658, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:21,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 288 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38730 deadline: 1732724661658, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:21,659 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:21,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 284 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38716 deadline: 1732724661659, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:21,846 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=680 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/C/9fd47d61c04c4dc5a651fd28b82f2efc 2024-11-27T16:23:21,850 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/A/f76c4d10e63e4cf7949e469e560104e2 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/A/f76c4d10e63e4cf7949e469e560104e2 2024-11-27T16:23:21,853 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/A/f76c4d10e63e4cf7949e469e560104e2, entries=200, sequenceid=680, filesize=14.4 K 2024-11-27T16:23:21,854 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/B/5aa6157fee434248a63be95d46e13d9a as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/B/5aa6157fee434248a63be95d46e13d9a 2024-11-27T16:23:21,857 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/B/5aa6157fee434248a63be95d46e13d9a, entries=150, sequenceid=680, filesize=12.0 K 2024-11-27T16:23:21,858 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/C/9fd47d61c04c4dc5a651fd28b82f2efc as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/C/9fd47d61c04c4dc5a651fd28b82f2efc 2024-11-27T16:23:21,861 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/C/9fd47d61c04c4dc5a651fd28b82f2efc, entries=150, sequenceid=680, filesize=12.0 K 2024-11-27T16:23:21,862 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=46.96 KB/48090 for 00f170535dc2739662302d98f22dc172 in 585ms, sequenceid=680, compaction requested=true 2024-11-27T16:23:21,862 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 00f170535dc2739662302d98f22dc172: 2024-11-27T16:23:21,862 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-27T16:23:21,863 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 00f170535dc2739662302d98f22dc172:A, priority=-2147483648, current under compaction store size is 1 2024-11-27T16:23:21,863 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T16:23:21,863 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-27T16:23:21,863 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 40875 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-27T16:23:21,863 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 00f170535dc2739662302d98f22dc172:B, priority=-2147483648, current under compaction store size is 2 2024-11-27T16:23:21,863 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T16:23:21,863 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HStore(1540): 00f170535dc2739662302d98f22dc172/A is initiating minor compaction (all files) 2024-11-27T16:23:21,863 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 00f170535dc2739662302d98f22dc172:C, priority=-2147483648, current under compaction store size is 3 2024-11-27T16:23:21,863 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-27T16:23:21,863 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 00f170535dc2739662302d98f22dc172/A in TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172. 2024-11-27T16:23:21,864 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/A/0c5bb02c99ea417fba5f4e8835558513, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/A/54397932bea844f4a9b3ed6b27e6c4ab, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/A/f76c4d10e63e4cf7949e469e560104e2] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp, totalSize=39.9 K 2024-11-27T16:23:21,864 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting 0c5bb02c99ea417fba5f4e8835558513, keycount=150, bloomtype=ROW, size=13.5 K, encoding=NONE, compression=NONE, seqNum=638, earliestPutTs=1732724599282 2024-11-27T16:23:21,864 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting 54397932bea844f4a9b3ed6b27e6c4ab, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=651, earliestPutTs=1732724599930 2024-11-27T16:23:21,864 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 38435 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-27T16:23:21,864 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HStore(1540): 00f170535dc2739662302d98f22dc172/B is initiating minor compaction (all files) 2024-11-27T16:23:21,864 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 00f170535dc2739662302d98f22dc172/B in TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172. 2024-11-27T16:23:21,864 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/B/55ca4f84eb934bc09989cb0b1451aa35, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/B/837d1242784c4fb19ae9e189569e33ca, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/B/5aa6157fee434248a63be95d46e13d9a] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp, totalSize=37.5 K 2024-11-27T16:23:21,865 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting f76c4d10e63e4cf7949e469e560104e2, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=680, earliestPutTs=1732724600637 2024-11-27T16:23:21,865 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 55ca4f84eb934bc09989cb0b1451aa35, keycount=150, bloomtype=ROW, size=13.5 K, encoding=NONE, compression=NONE, seqNum=638, earliestPutTs=1732724599282 2024-11-27T16:23:21,865 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 837d1242784c4fb19ae9e189569e33ca, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=651, earliestPutTs=1732724599930 2024-11-27T16:23:21,866 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5aa6157fee434248a63be95d46e13d9a, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=680, earliestPutTs=1732724600637 2024-11-27T16:23:21,887 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 00f170535dc2739662302d98f22dc172#A#compaction#557 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-27T16:23:21,887 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/A/53c1fafcefc94091b09c88ce0de6bad0 is 50, key is test_row_0/A:col10/1732724601277/Put/seqid=0 2024-11-27T16:23:21,892 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 00f170535dc2739662302d98f22dc172#B#compaction#558 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-27T16:23:21,892 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/B/cf71af47a8ed4c85b1f4cc60d89b40b2 is 50, key is test_row_0/B:col10/1732724601277/Put/seqid=0 2024-11-27T16:23:21,911 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742467_1643 (size=13935) 2024-11-27T16:23:21,917 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742468_1644 (size=13935) 2024-11-27T16:23:21,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(8581): Flush requested on 00f170535dc2739662302d98f22dc172 2024-11-27T16:23:21,946 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 00f170535dc2739662302d98f22dc172 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-27T16:23:21,946 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 00f170535dc2739662302d98f22dc172, store=A 2024-11-27T16:23:21,946 DEBUG [Thread-2206 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x46c37647 to 127.0.0.1:51088 2024-11-27T16:23:21,946 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:23:21,947 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 00f170535dc2739662302d98f22dc172, store=B 2024-11-27T16:23:21,947 DEBUG [Thread-2206 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-27T16:23:21,947 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:23:21,947 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 00f170535dc2739662302d98f22dc172, store=C 2024-11-27T16:23:21,947 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:23:21,950 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/A/c4f5571043854c39af2b3f7fb36ecebd is 50, key is test_row_0/A:col10/1732724601323/Put/seqid=0 2024-11-27T16:23:21,954 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742469_1645 (size=12301) 2024-11-27T16:23:21,955 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=691 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/A/c4f5571043854c39af2b3f7fb36ecebd 2024-11-27T16:23:21,961 DEBUG [Thread-2212 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x632d1806 to 127.0.0.1:51088 2024-11-27T16:23:21,961 DEBUG [Thread-2212 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-27T16:23:21,962 DEBUG [Thread-2210 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x51453050 to 127.0.0.1:51088 2024-11-27T16:23:21,962 DEBUG [Thread-2210 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-27T16:23:21,965 DEBUG [Thread-2214 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x4f99adfe to 127.0.0.1:51088 2024-11-27T16:23:21,965 DEBUG [Thread-2214 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-27T16:23:21,966 DEBUG [Thread-2208 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x2fb24d40 to 127.0.0.1:51088 2024-11-27T16:23:21,967 DEBUG [Thread-2208 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-27T16:23:21,974 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/B/661cb39e5f3845b492b6f204c8d6b732 is 50, key is test_row_0/B:col10/1732724601323/Put/seqid=0 2024-11-27T16:23:21,978 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742470_1646 (size=12301) 2024-11-27T16:23:21,978 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=691 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/B/661cb39e5f3845b492b6f204c8d6b732 2024-11-27T16:23:21,985 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/C/097ce23af4c74fe0b4cb7ab13f7330f8 is 50, key is test_row_0/C:col10/1732724601323/Put/seqid=0 2024-11-27T16:23:21,994 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742471_1647 (size=12301) 2024-11-27T16:23:21,995 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=691 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/C/097ce23af4c74fe0b4cb7ab13f7330f8 2024-11-27T16:23:22,000 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/A/c4f5571043854c39af2b3f7fb36ecebd as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/A/c4f5571043854c39af2b3f7fb36ecebd 2024-11-27T16:23:22,003 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/A/c4f5571043854c39af2b3f7fb36ecebd, entries=150, sequenceid=691, filesize=12.0 K 2024-11-27T16:23:22,004 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/B/661cb39e5f3845b492b6f204c8d6b732 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/B/661cb39e5f3845b492b6f204c8d6b732 2024-11-27T16:23:22,007 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/B/661cb39e5f3845b492b6f204c8d6b732, entries=150, sequenceid=691, filesize=12.0 K 2024-11-27T16:23:22,008 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/C/097ce23af4c74fe0b4cb7ab13f7330f8 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/C/097ce23af4c74fe0b4cb7ab13f7330f8 2024-11-27T16:23:22,011 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/C/097ce23af4c74fe0b4cb7ab13f7330f8, entries=150, sequenceid=691, filesize=12.0 K 2024-11-27T16:23:22,013 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=26.84 KB/27480 for 00f170535dc2739662302d98f22dc172 in 67ms, sequenceid=691, compaction requested=true 2024-11-27T16:23:22,013 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 00f170535dc2739662302d98f22dc172: 2024-11-27T16:23:22,013 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 00f170535dc2739662302d98f22dc172:A, priority=-2147483648, current under compaction store size is 3 2024-11-27T16:23:22,013 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-11-27T16:23:22,013 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 00f170535dc2739662302d98f22dc172:B, priority=-2147483648, current under compaction store size is 3 2024-11-27T16:23:22,013 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=3), splitQueue=0 2024-11-27T16:23:22,014 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 00f170535dc2739662302d98f22dc172:C, priority=-2147483648, current under compaction store size is 3 2024-11-27T16:23:22,014 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=4), splitQueue=0 2024-11-27T16:23:22,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=142 2024-11-27T16:23:22,172 INFO [Thread-2216 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 142 completed 2024-11-27T16:23:22,172 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(392): Finished test. Writers: 2024-11-27T16:23:22,173 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 111 2024-11-27T16:23:22,173 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 108 2024-11-27T16:23:22,173 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 107 2024-11-27T16:23:22,173 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 109 2024-11-27T16:23:22,173 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 113 2024-11-27T16:23:22,173 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(396): Readers: 2024-11-27T16:23:22,173 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(400): Scanners: 2024-11-27T16:23:22,173 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 1513 2024-11-27T16:23:22,173 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 4539 rows 2024-11-27T16:23:22,173 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 1524 2024-11-27T16:23:22,173 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 4572 rows 2024-11-27T16:23:22,173 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 1514 2024-11-27T16:23:22,173 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 4542 rows 2024-11-27T16:23:22,173 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 1513 2024-11-27T16:23:22,173 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 4539 rows 2024-11-27T16:23:22,173 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 1507 2024-11-27T16:23:22,173 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 4521 rows 2024-11-27T16:23:22,173 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-11-27T16:23:22,173 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x4fbee617 to 127.0.0.1:51088 2024-11-27T16:23:22,173 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-27T16:23:22,176 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of TestAcidGuarantees 2024-11-27T16:23:22,176 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable TestAcidGuarantees 2024-11-27T16:23:22,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] procedure2.ProcedureExecutor(1098): Stored pid=144, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=TestAcidGuarantees 2024-11-27T16:23:22,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=144 2024-11-27T16:23:22,183 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732724602183"}]},"ts":"1732724602183"} 2024-11-27T16:23:22,184 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLING in hbase:meta 2024-11-27T16:23:22,188 INFO [PEWorker-1 {}] procedure.DisableTableProcedure(284): Set TestAcidGuarantees to state=DISABLING 2024-11-27T16:23:22,188 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=145, ppid=144, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=TestAcidGuarantees}] 2024-11-27T16:23:22,189 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=146, ppid=145, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=00f170535dc2739662302d98f22dc172, UNASSIGN}] 2024-11-27T16:23:22,190 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=146, ppid=145, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=00f170535dc2739662302d98f22dc172, UNASSIGN 2024-11-27T16:23:22,191 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=146 updating hbase:meta row=00f170535dc2739662302d98f22dc172, regionState=CLOSING, regionLocation=7b191dec6496,44169,1732724452967 2024-11-27T16:23:22,191 DEBUG [PEWorker-2 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-27T16:23:22,192 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=147, ppid=146, state=RUNNABLE; CloseRegionProcedure 00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967}] 2024-11-27T16:23:22,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=144 2024-11-27T16:23:22,317 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/A/53c1fafcefc94091b09c88ce0de6bad0 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/A/53c1fafcefc94091b09c88ce0de6bad0 2024-11-27T16:23:22,323 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/B/cf71af47a8ed4c85b1f4cc60d89b40b2 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/B/cf71af47a8ed4c85b1f4cc60d89b40b2 2024-11-27T16:23:22,323 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 00f170535dc2739662302d98f22dc172/A of 00f170535dc2739662302d98f22dc172 into 53c1fafcefc94091b09c88ce0de6bad0(size=13.6 K), total size for store is 25.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T16:23:22,323 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 00f170535dc2739662302d98f22dc172: 2024-11-27T16:23:22,323 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172., storeName=00f170535dc2739662302d98f22dc172/A, priority=13, startTime=1732724601862; duration=0sec 2024-11-27T16:23:22,323 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=4), splitQueue=0 2024-11-27T16:23:22,323 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 00f170535dc2739662302d98f22dc172:A 2024-11-27T16:23:22,323 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 00f170535dc2739662302d98f22dc172:A 2024-11-27T16:23:22,323 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-27T16:23:22,326 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 50702 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-27T16:23:22,326 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HStore(1540): 00f170535dc2739662302d98f22dc172/C is initiating minor compaction (all files) 2024-11-27T16:23:22,326 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 00f170535dc2739662302d98f22dc172/C in TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172. 2024-11-27T16:23:22,326 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/C/e2f7f49962114a34b9a936431c208744, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/C/7b06a1b9108e417fb62cbb7071334a80, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/C/9fd47d61c04c4dc5a651fd28b82f2efc, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/C/097ce23af4c74fe0b4cb7ab13f7330f8] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp, totalSize=49.5 K 2024-11-27T16:23:22,327 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting e2f7f49962114a34b9a936431c208744, keycount=150, bloomtype=ROW, size=13.5 K, encoding=NONE, compression=NONE, seqNum=638, earliestPutTs=1732724599282 2024-11-27T16:23:22,327 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting 7b06a1b9108e417fb62cbb7071334a80, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=651, earliestPutTs=1732724599930 2024-11-27T16:23:22,328 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting 9fd47d61c04c4dc5a651fd28b82f2efc, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=680, earliestPutTs=1732724600637 2024-11-27T16:23:22,328 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 00f170535dc2739662302d98f22dc172/B of 00f170535dc2739662302d98f22dc172 into cf71af47a8ed4c85b1f4cc60d89b40b2(size=13.6 K), total size for store is 25.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T16:23:22,328 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 00f170535dc2739662302d98f22dc172: 2024-11-27T16:23:22,328 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172., storeName=00f170535dc2739662302d98f22dc172/B, priority=13, startTime=1732724601863; duration=0sec 2024-11-27T16:23:22,328 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=3), splitQueue=0 2024-11-27T16:23:22,328 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 00f170535dc2739662302d98f22dc172:B 2024-11-27T16:23:22,328 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 00f170535dc2739662302d98f22dc172:B 2024-11-27T16:23:22,329 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 4 compacting, 0 eligible, 16 blocking 2024-11-27T16:23:22,329 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 0 files of size 0 starting at candidate #-1 after considering 0 permutations with 0 in ratio 2024-11-27T16:23:22,329 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.SortedCompactionPolicy(232): Not compacting files because we only have 0 files ready for compaction. Need 3 to initiate. 2024-11-27T16:23:22,329 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.CompactSplit(450): Not compacting TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172. because compaction request was cancelled 2024-11-27T16:23:22,329 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 00f170535dc2739662302d98f22dc172:C 2024-11-27T16:23:22,329 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 2 store files, 0 compacting, 2 eligible, 16 blocking 2024-11-27T16:23:22,329 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting 097ce23af4c74fe0b4cb7ab13f7330f8, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=691, earliestPutTs=1732724601295 2024-11-27T16:23:22,329 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 0 files of size 0 starting at candidate #-1 after considering 0 permutations with 0 in ratio 2024-11-27T16:23:22,329 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.SortedCompactionPolicy(232): Not compacting files because we only have 0 files ready for compaction. Need 3 to initiate. 2024-11-27T16:23:22,329 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.CompactSplit(450): Not compacting TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172. because compaction request was cancelled 2024-11-27T16:23:22,330 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 00f170535dc2739662302d98f22dc172:B 2024-11-27T16:23:22,330 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 2 store files, 0 compacting, 2 eligible, 16 blocking 2024-11-27T16:23:22,330 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 0 files of size 0 starting at candidate #-1 after considering 0 permutations with 0 in ratio 2024-11-27T16:23:22,330 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.SortedCompactionPolicy(232): Not compacting files because we only have 0 files ready for compaction. Need 3 to initiate. 2024-11-27T16:23:22,330 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.CompactSplit(450): Not compacting TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172. because compaction request was cancelled 2024-11-27T16:23:22,330 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 00f170535dc2739662302d98f22dc172:A 2024-11-27T16:23:22,343 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:23:22,343 INFO [RS_CLOSE_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_CLOSE_REGION, pid=147}] handler.UnassignRegionHandler(124): Close 00f170535dc2739662302d98f22dc172 2024-11-27T16:23:22,343 DEBUG [RS_CLOSE_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_CLOSE_REGION, pid=147}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-11-27T16:23:22,344 DEBUG [RS_CLOSE_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_CLOSE_REGION, pid=147}] regionserver.HRegion(1681): Closing 00f170535dc2739662302d98f22dc172, disabling compactions & flushes 2024-11-27T16:23:22,344 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 00f170535dc2739662302d98f22dc172#C#compaction#562 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-27T16:23:22,344 DEBUG [RS_CLOSE_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_CLOSE_REGION, pid=147}] regionserver.HRegion(1942): waiting for 1 compactions to complete for region TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172. 2024-11-27T16:23:22,345 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/C/d1881ac599684ef1813767c0209f0509 is 50, key is test_row_0/C:col10/1732724601323/Put/seqid=0 2024-11-27T16:23:22,352 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742472_1648 (size=13935) 2024-11-27T16:23:22,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=144 2024-11-27T16:23:22,756 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/C/d1881ac599684ef1813767c0209f0509 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/C/d1881ac599684ef1813767c0209f0509 2024-11-27T16:23:22,760 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 00f170535dc2739662302d98f22dc172/C of 00f170535dc2739662302d98f22dc172 into d1881ac599684ef1813767c0209f0509(size=13.6 K), total size for store is 13.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T16:23:22,760 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 00f170535dc2739662302d98f22dc172: 2024-11-27T16:23:22,760 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172., storeName=00f170535dc2739662302d98f22dc172/C, priority=12, startTime=1732724602013; duration=0sec 2024-11-27T16:23:22,760 INFO [RS_CLOSE_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_CLOSE_REGION, pid=147}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172. 2024-11-27T16:23:22,760 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T16:23:22,760 DEBUG [RS_CLOSE_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_CLOSE_REGION, pid=147}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172. 2024-11-27T16:23:22,760 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 00f170535dc2739662302d98f22dc172:C 2024-11-27T16:23:22,760 DEBUG [RS_CLOSE_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_CLOSE_REGION, pid=147}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172. after waiting 0 ms 2024-11-27T16:23:22,760 DEBUG [RS_CLOSE_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_CLOSE_REGION, pid=147}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172. 2024-11-27T16:23:22,760 INFO [RS_CLOSE_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_CLOSE_REGION, pid=147}] regionserver.HRegion(2837): Flushing 00f170535dc2739662302d98f22dc172 3/3 column families, dataSize=26.84 KB heapSize=71.06 KB 2024-11-27T16:23:22,761 DEBUG [RS_CLOSE_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_CLOSE_REGION, pid=147}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 00f170535dc2739662302d98f22dc172, store=A 2024-11-27T16:23:22,761 DEBUG [RS_CLOSE_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_CLOSE_REGION, pid=147}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:23:22,761 DEBUG [RS_CLOSE_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_CLOSE_REGION, pid=147}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 00f170535dc2739662302d98f22dc172, store=B 2024-11-27T16:23:22,761 DEBUG [RS_CLOSE_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_CLOSE_REGION, pid=147}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:23:22,761 DEBUG [RS_CLOSE_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_CLOSE_REGION, pid=147}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 00f170535dc2739662302d98f22dc172, store=C 2024-11-27T16:23:22,761 DEBUG [RS_CLOSE_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_CLOSE_REGION, pid=147}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:23:22,764 DEBUG [RS_CLOSE_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_CLOSE_REGION, pid=147}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/A/041fe1207c1b4b2cb74d3d079d82c73e is 50, key is test_row_0/A:col10/1732724601962/Put/seqid=0 2024-11-27T16:23:22,771 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742473_1649 (size=9857) 2024-11-27T16:23:22,772 INFO [RS_CLOSE_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_CLOSE_REGION, pid=147}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.95 KB at sequenceid=701 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/A/041fe1207c1b4b2cb74d3d079d82c73e 2024-11-27T16:23:22,781 DEBUG [RS_CLOSE_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_CLOSE_REGION, pid=147}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/B/839b0f3fb2824177ba01c4ab774d2d19 is 50, key is test_row_0/B:col10/1732724601962/Put/seqid=0 2024-11-27T16:23:22,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=144 2024-11-27T16:23:22,791 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742474_1650 (size=9857) 2024-11-27T16:23:23,191 INFO [RS_CLOSE_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_CLOSE_REGION, pid=147}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.95 KB at sequenceid=701 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/B/839b0f3fb2824177ba01c4ab774d2d19 2024-11-27T16:23:23,198 DEBUG [RS_CLOSE_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_CLOSE_REGION, pid=147}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/C/f7084a6a6d2b4e86b1d933200e45731c is 50, key is test_row_0/C:col10/1732724601962/Put/seqid=0 2024-11-27T16:23:23,201 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742475_1651 (size=9857) 2024-11-27T16:23:23,202 INFO [RS_CLOSE_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_CLOSE_REGION, pid=147}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.95 KB at sequenceid=701 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/C/f7084a6a6d2b4e86b1d933200e45731c 2024-11-27T16:23:23,205 DEBUG [RS_CLOSE_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_CLOSE_REGION, pid=147}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/A/041fe1207c1b4b2cb74d3d079d82c73e as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/A/041fe1207c1b4b2cb74d3d079d82c73e 2024-11-27T16:23:23,209 INFO [RS_CLOSE_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_CLOSE_REGION, pid=147}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/A/041fe1207c1b4b2cb74d3d079d82c73e, entries=100, sequenceid=701, filesize=9.6 K 2024-11-27T16:23:23,209 DEBUG [RS_CLOSE_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_CLOSE_REGION, pid=147}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/B/839b0f3fb2824177ba01c4ab774d2d19 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/B/839b0f3fb2824177ba01c4ab774d2d19 2024-11-27T16:23:23,213 INFO [RS_CLOSE_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_CLOSE_REGION, pid=147}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/B/839b0f3fb2824177ba01c4ab774d2d19, entries=100, sequenceid=701, filesize=9.6 K 2024-11-27T16:23:23,214 DEBUG [RS_CLOSE_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_CLOSE_REGION, pid=147}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/.tmp/C/f7084a6a6d2b4e86b1d933200e45731c as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/C/f7084a6a6d2b4e86b1d933200e45731c 2024-11-27T16:23:23,220 INFO [RS_CLOSE_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_CLOSE_REGION, pid=147}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/C/f7084a6a6d2b4e86b1d933200e45731c, entries=100, sequenceid=701, filesize=9.6 K 2024-11-27T16:23:23,220 INFO [RS_CLOSE_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_CLOSE_REGION, pid=147}] regionserver.HRegion(3040): Finished flush of dataSize ~26.84 KB/27480, heapSize ~71.02 KB/72720, currentSize=0 B/0 for 00f170535dc2739662302d98f22dc172 in 460ms, sequenceid=701, compaction requested=true 2024-11-27T16:23:23,221 DEBUG [StoreCloser-TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/A/e8a26740741f4b849f0fe7496b05942f, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/A/d6c00ceab2d04782bf4824d90bbcf4f3, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/A/6a935771f09f4745af5b04dad37d5037, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/A/c554ac2f54624062a5216cdb33789176, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/A/1ef4213d8d6b4d3fb57a08cd37e46dd8, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/A/5b25aac186af46c48d8dbce3fa2f4ab8, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/A/202a5835ff2347a094a39485e09d0d05, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/A/abd2ccbf670c47f89a02dea2886333a8, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/A/8ceff44c27d64b97b7c6930348021134, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/A/027b19a0d4854dd49aa8c5c779731eba, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/A/173e5aa140f34d1b96fa583fb7d873f7, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/A/d04b564a13ca4e579fe1a63d84755b95, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/A/bf69d9ce36f4465596589255c77d53d0, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/A/a9eb5eb61caa4d80b3fd908872a17c8e, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/A/b10e0d8cedc54ea3897de9a22d060719, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/A/f05cd7f7472549b0b9cf09365ce5125a, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/A/01c66ad47c024f4cab1cf14f7b8dee47, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/A/ed34516e66b6428bbd1d8e513f925009, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/A/faffc142ce0940168c65558d0c4a1e73, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/A/33939db86a7c4d59a6de3f1599b59ca8, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/A/a50eeb12c4ce4f9598ccdf3d679eb990, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/A/2091a4c80c6b4793acb1641a1364d922, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/A/2f4b913bddc74ea39c7b610cd249b1c5, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/A/fc8d06d6fd24444ea2dbcd806a315618, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/A/8e7aab86d4b048adb6adf190956faadf, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/A/32ec75c3e0974858abf81a3a714c1548, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/A/2f6313c4596c4b4a987e88f797179aa5, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/A/f21b191690054776ba08244a3f0429b3, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/A/6a34adcb00a6486a92bc6b36938344fa, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/A/e2a2a529f4af4e01b9442c70cc5d47a3, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/A/333c5f8488d0457c9b04488b7ba6fe54, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/A/d19abd991f084a37bfbba9a71f90dbf1, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/A/a40b2bec98cb4c2b8a113862d051afef, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/A/bb58fb4a14b4471599bbe79c21c6dbab, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/A/9de8c476983248efbb99bc0edfb874a8, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/A/1631abbcdbb84e4da6007f7d96113987, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/A/c222219510cf4e17939071f28b30e023, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/A/a6082630179d4e27bcd44c353e3ab108, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/A/5e56273cc7f942ee9d02bf24f20caa68, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/A/7d8bea0d8a2a4048bb1a90363b50bd73, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/A/61804fc8a03f4537845e708a904d4c89, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/A/dde001a7a4b542af97e09ba5fd8bcd2a, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/A/a6513dad992444589961581e80148a78, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/A/6cecc4ab8db04ee38667131bce798d04, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/A/e0a60f49d79c442c8eca80795d778c1f, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/A/0c5bb02c99ea417fba5f4e8835558513, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/A/54397932bea844f4a9b3ed6b27e6c4ab, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/A/f76c4d10e63e4cf7949e469e560104e2] to archive 2024-11-27T16:23:23,222 DEBUG [StoreCloser-TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-27T16:23:23,224 DEBUG [StoreCloser-TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/A/e8a26740741f4b849f0fe7496b05942f to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/A/e8a26740741f4b849f0fe7496b05942f 2024-11-27T16:23:23,225 DEBUG [StoreCloser-TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/A/d6c00ceab2d04782bf4824d90bbcf4f3 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/A/d6c00ceab2d04782bf4824d90bbcf4f3 2024-11-27T16:23:23,227 DEBUG [StoreCloser-TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/A/6a935771f09f4745af5b04dad37d5037 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/A/6a935771f09f4745af5b04dad37d5037 2024-11-27T16:23:23,228 DEBUG [StoreCloser-TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/A/c554ac2f54624062a5216cdb33789176 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/A/c554ac2f54624062a5216cdb33789176 2024-11-27T16:23:23,229 DEBUG [StoreCloser-TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/A/1ef4213d8d6b4d3fb57a08cd37e46dd8 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/A/1ef4213d8d6b4d3fb57a08cd37e46dd8 2024-11-27T16:23:23,230 DEBUG [StoreCloser-TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/A/5b25aac186af46c48d8dbce3fa2f4ab8 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/A/5b25aac186af46c48d8dbce3fa2f4ab8 2024-11-27T16:23:23,231 DEBUG [StoreCloser-TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/A/202a5835ff2347a094a39485e09d0d05 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/A/202a5835ff2347a094a39485e09d0d05 2024-11-27T16:23:23,233 DEBUG [StoreCloser-TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/A/abd2ccbf670c47f89a02dea2886333a8 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/A/abd2ccbf670c47f89a02dea2886333a8 2024-11-27T16:23:23,234 DEBUG [StoreCloser-TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/A/8ceff44c27d64b97b7c6930348021134 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/A/8ceff44c27d64b97b7c6930348021134 2024-11-27T16:23:23,235 DEBUG [StoreCloser-TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/A/027b19a0d4854dd49aa8c5c779731eba to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/A/027b19a0d4854dd49aa8c5c779731eba 2024-11-27T16:23:23,236 DEBUG [StoreCloser-TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/A/173e5aa140f34d1b96fa583fb7d873f7 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/A/173e5aa140f34d1b96fa583fb7d873f7 2024-11-27T16:23:23,237 DEBUG [StoreCloser-TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/A/d04b564a13ca4e579fe1a63d84755b95 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/A/d04b564a13ca4e579fe1a63d84755b95 2024-11-27T16:23:23,238 DEBUG [StoreCloser-TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/A/bf69d9ce36f4465596589255c77d53d0 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/A/bf69d9ce36f4465596589255c77d53d0 2024-11-27T16:23:23,240 DEBUG [StoreCloser-TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/A/a9eb5eb61caa4d80b3fd908872a17c8e to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/A/a9eb5eb61caa4d80b3fd908872a17c8e 2024-11-27T16:23:23,241 DEBUG [StoreCloser-TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/A/b10e0d8cedc54ea3897de9a22d060719 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/A/b10e0d8cedc54ea3897de9a22d060719 2024-11-27T16:23:23,242 DEBUG [StoreCloser-TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/A/f05cd7f7472549b0b9cf09365ce5125a to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/A/f05cd7f7472549b0b9cf09365ce5125a 2024-11-27T16:23:23,243 DEBUG [StoreCloser-TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/A/01c66ad47c024f4cab1cf14f7b8dee47 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/A/01c66ad47c024f4cab1cf14f7b8dee47 2024-11-27T16:23:23,244 DEBUG [StoreCloser-TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/A/ed34516e66b6428bbd1d8e513f925009 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/A/ed34516e66b6428bbd1d8e513f925009 2024-11-27T16:23:23,245 DEBUG [StoreCloser-TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/A/faffc142ce0940168c65558d0c4a1e73 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/A/faffc142ce0940168c65558d0c4a1e73 2024-11-27T16:23:23,246 DEBUG [StoreCloser-TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/A/33939db86a7c4d59a6de3f1599b59ca8 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/A/33939db86a7c4d59a6de3f1599b59ca8 2024-11-27T16:23:23,247 DEBUG [StoreCloser-TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/A/a50eeb12c4ce4f9598ccdf3d679eb990 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/A/a50eeb12c4ce4f9598ccdf3d679eb990 2024-11-27T16:23:23,248 DEBUG [StoreCloser-TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/A/2091a4c80c6b4793acb1641a1364d922 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/A/2091a4c80c6b4793acb1641a1364d922 2024-11-27T16:23:23,250 DEBUG [StoreCloser-TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/A/2f4b913bddc74ea39c7b610cd249b1c5 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/A/2f4b913bddc74ea39c7b610cd249b1c5 2024-11-27T16:23:23,251 DEBUG [StoreCloser-TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/A/fc8d06d6fd24444ea2dbcd806a315618 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/A/fc8d06d6fd24444ea2dbcd806a315618 2024-11-27T16:23:23,252 DEBUG [StoreCloser-TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/A/8e7aab86d4b048adb6adf190956faadf to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/A/8e7aab86d4b048adb6adf190956faadf 2024-11-27T16:23:23,253 DEBUG [StoreCloser-TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/A/32ec75c3e0974858abf81a3a714c1548 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/A/32ec75c3e0974858abf81a3a714c1548 2024-11-27T16:23:23,254 DEBUG [StoreCloser-TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/A/2f6313c4596c4b4a987e88f797179aa5 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/A/2f6313c4596c4b4a987e88f797179aa5 2024-11-27T16:23:23,255 DEBUG [StoreCloser-TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/A/f21b191690054776ba08244a3f0429b3 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/A/f21b191690054776ba08244a3f0429b3 2024-11-27T16:23:23,256 DEBUG [StoreCloser-TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/A/6a34adcb00a6486a92bc6b36938344fa to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/A/6a34adcb00a6486a92bc6b36938344fa 2024-11-27T16:23:23,257 DEBUG [StoreCloser-TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/A/e2a2a529f4af4e01b9442c70cc5d47a3 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/A/e2a2a529f4af4e01b9442c70cc5d47a3 2024-11-27T16:23:23,259 DEBUG [StoreCloser-TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/A/333c5f8488d0457c9b04488b7ba6fe54 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/A/333c5f8488d0457c9b04488b7ba6fe54 2024-11-27T16:23:23,260 DEBUG [StoreCloser-TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/A/d19abd991f084a37bfbba9a71f90dbf1 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/A/d19abd991f084a37bfbba9a71f90dbf1 2024-11-27T16:23:23,261 DEBUG [StoreCloser-TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/A/a40b2bec98cb4c2b8a113862d051afef to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/A/a40b2bec98cb4c2b8a113862d051afef 2024-11-27T16:23:23,262 DEBUG [StoreCloser-TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/A/bb58fb4a14b4471599bbe79c21c6dbab to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/A/bb58fb4a14b4471599bbe79c21c6dbab 2024-11-27T16:23:23,263 DEBUG [StoreCloser-TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/A/9de8c476983248efbb99bc0edfb874a8 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/A/9de8c476983248efbb99bc0edfb874a8 2024-11-27T16:23:23,266 DEBUG [StoreCloser-TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/A/1631abbcdbb84e4da6007f7d96113987 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/A/1631abbcdbb84e4da6007f7d96113987 2024-11-27T16:23:23,267 DEBUG [StoreCloser-TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/A/c222219510cf4e17939071f28b30e023 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/A/c222219510cf4e17939071f28b30e023 2024-11-27T16:23:23,269 DEBUG [StoreCloser-TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/A/a6082630179d4e27bcd44c353e3ab108 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/A/a6082630179d4e27bcd44c353e3ab108 2024-11-27T16:23:23,270 DEBUG [StoreCloser-TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/A/5e56273cc7f942ee9d02bf24f20caa68 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/A/5e56273cc7f942ee9d02bf24f20caa68 2024-11-27T16:23:23,272 DEBUG [StoreCloser-TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/A/7d8bea0d8a2a4048bb1a90363b50bd73 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/A/7d8bea0d8a2a4048bb1a90363b50bd73 2024-11-27T16:23:23,273 DEBUG [StoreCloser-TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/A/61804fc8a03f4537845e708a904d4c89 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/A/61804fc8a03f4537845e708a904d4c89 2024-11-27T16:23:23,274 DEBUG [StoreCloser-TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/A/dde001a7a4b542af97e09ba5fd8bcd2a to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/A/dde001a7a4b542af97e09ba5fd8bcd2a 2024-11-27T16:23:23,276 DEBUG [StoreCloser-TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/A/a6513dad992444589961581e80148a78 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/A/a6513dad992444589961581e80148a78 2024-11-27T16:23:23,277 DEBUG [StoreCloser-TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/A/6cecc4ab8db04ee38667131bce798d04 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/A/6cecc4ab8db04ee38667131bce798d04 2024-11-27T16:23:23,279 DEBUG [StoreCloser-TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/A/e0a60f49d79c442c8eca80795d778c1f to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/A/e0a60f49d79c442c8eca80795d778c1f 2024-11-27T16:23:23,280 DEBUG [StoreCloser-TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/A/0c5bb02c99ea417fba5f4e8835558513 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/A/0c5bb02c99ea417fba5f4e8835558513 2024-11-27T16:23:23,281 DEBUG [StoreCloser-TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/A/54397932bea844f4a9b3ed6b27e6c4ab to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/A/54397932bea844f4a9b3ed6b27e6c4ab 2024-11-27T16:23:23,282 DEBUG [StoreCloser-TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/A/f76c4d10e63e4cf7949e469e560104e2 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/A/f76c4d10e63e4cf7949e469e560104e2 2024-11-27T16:23:23,284 DEBUG [StoreCloser-TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/B/b2bc52a29d364b23a2cdb82ec5857967, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/B/d568b3410e7945e9893fa535cf1fef55, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/B/2ef032029d5e43c0ba0d6d0e070f28c4, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/B/4b84932384d84719b57dc8094bd675af, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/B/aa2fed1c24bc4c0086b4a6e4f571e1cc, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/B/2b9929e0e6984d6e9208ee98a0ceeba3, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/B/dacb4904cd2b4d3b9fbebe24e24bc28d, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/B/0ee44db78c9845d6b1b73872ee90757f, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/B/1c5c35968da64675ae2f58218f9241a5, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/B/1c49febce62f4dc69e9c789364f1b733, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/B/536f8ee4844d407c95eeeaec941024c7, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/B/aebfa2b0c5484d268712be576dfe8961, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/B/2889764122b6435ca4942242bbda25ec, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/B/40ad39a02eb545bea4cb7160c8428f0b, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/B/f61f2d6dcc5b4279860429e7e31b2c58, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/B/d6151e51a3c0447c902244f28370520f, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/B/6fd143f2831c4ec584f85039a5c43041, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/B/1888098ced4d406fb3465a20238f6b32, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/B/d7ca5b78a16b4ae688deb62684eb0747, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/B/0339fd9cc08045e1bffab4f8d96b322d, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/B/9aa1588197e347659688112ad43f5d17, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/B/e5b45599a6e3479fb1462d100260bee8, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/B/1c37a4a5a94a416fa4baefdba30ad0cb, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/B/29b65fdf90dd47379046befca53e980d, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/B/6f8309f8431e4f1497e30fc797a9572e, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/B/af48d16ee85e4a52bfa36f65057b74e7, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/B/1147bbdb737d4a9e8492f5bde6d9cb23, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/B/37fffaf3fd224b3db62622c1c714b61a, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/B/36ed274f8c2a4ca89c816ebb93402fdb, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/B/f04fea1417df4934a4f7c323065fb1d7, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/B/b93592fa81e24280b32a40afe7bcc0c1, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/B/2190680471c3439baf8fc1e6064ef09b, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/B/e79c9bd72186448aa7e33cb734901486, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/B/6c01b22228e443d590540dd56006ce2a, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/B/16e239afdccc444996582b6f011210ac, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/B/706a93df883f47b5a046cbb36dced085, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/B/568a3511e05f4e27b3418af17f810e37, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/B/fc0bb60ceebd4308a2017ba38662667b, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/B/945bdf9c4e584f49b61d6f0da43e79e5, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/B/c6e940daa04146d4986b5c83f861eb00, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/B/20b65366f0744631bf2c509cbc7c32fe, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/B/f5981941897b46f6aae3a1694afb64de, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/B/8f6456ddeda74d5987f22b81c7b8d456, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/B/80a23444ffaa497293ff7f03f682eef7, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/B/55ca4f84eb934bc09989cb0b1451aa35, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/B/7f28fefc293b4b24b7edc7c7ccbaa9ad, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/B/837d1242784c4fb19ae9e189569e33ca, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/B/5aa6157fee434248a63be95d46e13d9a] to archive 2024-11-27T16:23:23,285 DEBUG [StoreCloser-TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-27T16:23:23,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=144 2024-11-27T16:23:23,289 DEBUG [StoreCloser-TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/B/b2bc52a29d364b23a2cdb82ec5857967 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/B/b2bc52a29d364b23a2cdb82ec5857967 2024-11-27T16:23:23,290 DEBUG [StoreCloser-TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/B/d568b3410e7945e9893fa535cf1fef55 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/B/d568b3410e7945e9893fa535cf1fef55 2024-11-27T16:23:23,291 DEBUG [StoreCloser-TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/B/2ef032029d5e43c0ba0d6d0e070f28c4 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/B/2ef032029d5e43c0ba0d6d0e070f28c4 2024-11-27T16:23:23,292 DEBUG [StoreCloser-TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/B/4b84932384d84719b57dc8094bd675af to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/B/4b84932384d84719b57dc8094bd675af 2024-11-27T16:23:23,293 DEBUG [StoreCloser-TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/B/aa2fed1c24bc4c0086b4a6e4f571e1cc to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/B/aa2fed1c24bc4c0086b4a6e4f571e1cc 2024-11-27T16:23:23,294 DEBUG [StoreCloser-TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/B/2b9929e0e6984d6e9208ee98a0ceeba3 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/B/2b9929e0e6984d6e9208ee98a0ceeba3 2024-11-27T16:23:23,295 DEBUG [StoreCloser-TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/B/dacb4904cd2b4d3b9fbebe24e24bc28d to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/B/dacb4904cd2b4d3b9fbebe24e24bc28d 2024-11-27T16:23:23,296 DEBUG [StoreCloser-TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/B/0ee44db78c9845d6b1b73872ee90757f to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/B/0ee44db78c9845d6b1b73872ee90757f 2024-11-27T16:23:23,297 DEBUG [StoreCloser-TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/B/1c5c35968da64675ae2f58218f9241a5 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/B/1c5c35968da64675ae2f58218f9241a5 2024-11-27T16:23:23,299 DEBUG [StoreCloser-TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/B/1c49febce62f4dc69e9c789364f1b733 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/B/1c49febce62f4dc69e9c789364f1b733 2024-11-27T16:23:23,300 DEBUG [StoreCloser-TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/B/536f8ee4844d407c95eeeaec941024c7 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/B/536f8ee4844d407c95eeeaec941024c7 2024-11-27T16:23:23,301 DEBUG [StoreCloser-TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/B/aebfa2b0c5484d268712be576dfe8961 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/B/aebfa2b0c5484d268712be576dfe8961 2024-11-27T16:23:23,302 DEBUG [StoreCloser-TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/B/2889764122b6435ca4942242bbda25ec to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/B/2889764122b6435ca4942242bbda25ec 2024-11-27T16:23:23,303 DEBUG [StoreCloser-TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/B/40ad39a02eb545bea4cb7160c8428f0b to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/B/40ad39a02eb545bea4cb7160c8428f0b 2024-11-27T16:23:23,304 DEBUG [StoreCloser-TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/B/f61f2d6dcc5b4279860429e7e31b2c58 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/B/f61f2d6dcc5b4279860429e7e31b2c58 2024-11-27T16:23:23,305 DEBUG [StoreCloser-TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/B/d6151e51a3c0447c902244f28370520f to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/B/d6151e51a3c0447c902244f28370520f 2024-11-27T16:23:23,306 DEBUG [StoreCloser-TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/B/6fd143f2831c4ec584f85039a5c43041 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/B/6fd143f2831c4ec584f85039a5c43041 2024-11-27T16:23:23,307 DEBUG [StoreCloser-TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/B/1888098ced4d406fb3465a20238f6b32 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/B/1888098ced4d406fb3465a20238f6b32 2024-11-27T16:23:23,309 DEBUG [StoreCloser-TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/B/d7ca5b78a16b4ae688deb62684eb0747 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/B/d7ca5b78a16b4ae688deb62684eb0747 2024-11-27T16:23:23,310 DEBUG [StoreCloser-TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/B/0339fd9cc08045e1bffab4f8d96b322d to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/B/0339fd9cc08045e1bffab4f8d96b322d 2024-11-27T16:23:23,311 DEBUG [StoreCloser-TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/B/9aa1588197e347659688112ad43f5d17 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/B/9aa1588197e347659688112ad43f5d17 2024-11-27T16:23:23,312 DEBUG [StoreCloser-TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/B/e5b45599a6e3479fb1462d100260bee8 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/B/e5b45599a6e3479fb1462d100260bee8 2024-11-27T16:23:23,313 DEBUG [StoreCloser-TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/B/1c37a4a5a94a416fa4baefdba30ad0cb to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/B/1c37a4a5a94a416fa4baefdba30ad0cb 2024-11-27T16:23:23,314 DEBUG [StoreCloser-TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/B/29b65fdf90dd47379046befca53e980d to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/B/29b65fdf90dd47379046befca53e980d 2024-11-27T16:23:23,316 DEBUG [StoreCloser-TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/B/6f8309f8431e4f1497e30fc797a9572e to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/B/6f8309f8431e4f1497e30fc797a9572e 2024-11-27T16:23:23,318 DEBUG [StoreCloser-TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/B/af48d16ee85e4a52bfa36f65057b74e7 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/B/af48d16ee85e4a52bfa36f65057b74e7 2024-11-27T16:23:23,320 DEBUG [StoreCloser-TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/B/1147bbdb737d4a9e8492f5bde6d9cb23 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/B/1147bbdb737d4a9e8492f5bde6d9cb23 2024-11-27T16:23:23,321 DEBUG [StoreCloser-TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/B/37fffaf3fd224b3db62622c1c714b61a to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/B/37fffaf3fd224b3db62622c1c714b61a 2024-11-27T16:23:23,322 DEBUG [StoreCloser-TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/B/36ed274f8c2a4ca89c816ebb93402fdb to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/B/36ed274f8c2a4ca89c816ebb93402fdb 2024-11-27T16:23:23,323 DEBUG [StoreCloser-TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/B/f04fea1417df4934a4f7c323065fb1d7 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/B/f04fea1417df4934a4f7c323065fb1d7 2024-11-27T16:23:23,324 DEBUG [StoreCloser-TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/B/b93592fa81e24280b32a40afe7bcc0c1 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/B/b93592fa81e24280b32a40afe7bcc0c1 2024-11-27T16:23:23,325 DEBUG [StoreCloser-TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/B/2190680471c3439baf8fc1e6064ef09b to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/B/2190680471c3439baf8fc1e6064ef09b 2024-11-27T16:23:23,326 DEBUG [StoreCloser-TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/B/e79c9bd72186448aa7e33cb734901486 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/B/e79c9bd72186448aa7e33cb734901486 2024-11-27T16:23:23,328 DEBUG [StoreCloser-TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/B/6c01b22228e443d590540dd56006ce2a to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/B/6c01b22228e443d590540dd56006ce2a 2024-11-27T16:23:23,329 DEBUG [StoreCloser-TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/B/16e239afdccc444996582b6f011210ac to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/B/16e239afdccc444996582b6f011210ac 2024-11-27T16:23:23,330 DEBUG [StoreCloser-TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/B/706a93df883f47b5a046cbb36dced085 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/B/706a93df883f47b5a046cbb36dced085 2024-11-27T16:23:23,331 DEBUG [StoreCloser-TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/B/568a3511e05f4e27b3418af17f810e37 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/B/568a3511e05f4e27b3418af17f810e37 2024-11-27T16:23:23,332 DEBUG [StoreCloser-TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/B/fc0bb60ceebd4308a2017ba38662667b to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/B/fc0bb60ceebd4308a2017ba38662667b 2024-11-27T16:23:23,333 DEBUG [StoreCloser-TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/B/945bdf9c4e584f49b61d6f0da43e79e5 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/B/945bdf9c4e584f49b61d6f0da43e79e5 2024-11-27T16:23:23,334 DEBUG [StoreCloser-TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/B/c6e940daa04146d4986b5c83f861eb00 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/B/c6e940daa04146d4986b5c83f861eb00 2024-11-27T16:23:23,335 DEBUG [StoreCloser-TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/B/20b65366f0744631bf2c509cbc7c32fe to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/B/20b65366f0744631bf2c509cbc7c32fe 2024-11-27T16:23:23,336 DEBUG [StoreCloser-TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/B/f5981941897b46f6aae3a1694afb64de to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/B/f5981941897b46f6aae3a1694afb64de 2024-11-27T16:23:23,338 DEBUG [StoreCloser-TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/B/8f6456ddeda74d5987f22b81c7b8d456 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/B/8f6456ddeda74d5987f22b81c7b8d456 2024-11-27T16:23:23,339 DEBUG [StoreCloser-TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/B/80a23444ffaa497293ff7f03f682eef7 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/B/80a23444ffaa497293ff7f03f682eef7 2024-11-27T16:23:23,340 DEBUG [StoreCloser-TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/B/55ca4f84eb934bc09989cb0b1451aa35 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/B/55ca4f84eb934bc09989cb0b1451aa35 2024-11-27T16:23:23,341 DEBUG [StoreCloser-TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/B/7f28fefc293b4b24b7edc7c7ccbaa9ad to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/B/7f28fefc293b4b24b7edc7c7ccbaa9ad 2024-11-27T16:23:23,343 DEBUG [StoreCloser-TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/B/837d1242784c4fb19ae9e189569e33ca to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/B/837d1242784c4fb19ae9e189569e33ca 2024-11-27T16:23:23,344 DEBUG [StoreCloser-TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/B/5aa6157fee434248a63be95d46e13d9a to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/B/5aa6157fee434248a63be95d46e13d9a 2024-11-27T16:23:23,346 DEBUG [StoreCloser-TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/C/bc09cf13db674d4999d215696716ec65, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/C/9b5e200e5cca4031ac8f0b1cf6b9ef83, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/C/a87e27cf2302473fad06a9e2caabb5c5, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/C/73e7961fa2094c2286e3b553d2a4fd72, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/C/cf43fa8566dc4feab1b584de59cd4db9, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/C/d3f30e7e760a4225b9c2d3b2599c943e, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/C/4be17d9b935745c893c0d2615defaba3, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/C/27a6d4aae8904a31a0f5a5c2c19fafdc, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/C/8e9827543ccf4ab291448b32af8e18c0, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/C/db9576b0a23c42a6a00a197db4a989c5, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/C/19dd11dbe9db49de8303b7cfb00240d3, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/C/2d9bb4bd29d1480f9a8fba2008205144, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/C/1046c5ead2e841f688deba7486999267, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/C/20faed288a4c4b2aae624e73c58c80e2, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/C/314d9b4e2cfc40aaa2cf314f680ffc33, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/C/43d1ef25bef2444dba94850911364d33, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/C/bf08b6c45de542c5bc5a4303df36b7af, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/C/cab721fa76c04a3db7a5ef73194c2bf1, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/C/3441ad56dc0044d4badd5e09d7e6dcff, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/C/aa00dc7007ad490298c8a020931bd42e, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/C/325e959dab6a410d824f264134169cf5, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/C/8ebf6f1914084a51aeb9fe8fe02c5faf, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/C/a0052fb15f9c469fa44cf1350f650d12, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/C/6699b72357a945aca96020e22129b03e, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/C/f0741e447a0f4104b392c62fe4ced00c, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/C/a80c895dc24f408080ccec613b2c533a, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/C/59c38ccfa5b74759a91333b880d512bd, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/C/7437d1c3b25b4f3c968a8595076467e4, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/C/2a90cc06e1754d31b1d2ef5b9c499b1d, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/C/43e05408466d42569a4e8051dee0e621, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/C/529e2e8e231d4e1b9abc39612abf5e42, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/C/7186d5fde2244e1b9816b82de79900ce, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/C/2950bb6aa7d141dd9dd46481ad5ab175, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/C/14dcbb8ddfe748dd9543842772e16fd1, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/C/ee7b1752fb6e46a6a9021f9377602447, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/C/a44ec08f2bd541e396deef15fc6925bc, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/C/f7922bce7a4c4289bca4fd4d6493222c, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/C/1d86837c25774122a5ac89e65672bab7, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/C/161d1ae182084c189a52c45551d13ffe, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/C/cff6fe14ca2e4aad91ea331c79c83f53, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/C/72ff5ca082474f7ba52cd96a33d75f08, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/C/cbdc448e2e984a488a9f943abfdfa663, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/C/bec4f1a459554817823b24840daa24c6, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/C/e2f7f49962114a34b9a936431c208744, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/C/f41a89400ed345ceb89edeb17d0fd77a, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/C/7b06a1b9108e417fb62cbb7071334a80, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/C/9fd47d61c04c4dc5a651fd28b82f2efc, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/C/097ce23af4c74fe0b4cb7ab13f7330f8] to archive 2024-11-27T16:23:23,347 DEBUG [StoreCloser-TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-27T16:23:23,348 DEBUG [StoreCloser-TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/C/bc09cf13db674d4999d215696716ec65 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/C/bc09cf13db674d4999d215696716ec65 2024-11-27T16:23:23,349 DEBUG [StoreCloser-TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/C/9b5e200e5cca4031ac8f0b1cf6b9ef83 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/C/9b5e200e5cca4031ac8f0b1cf6b9ef83 2024-11-27T16:23:23,350 DEBUG [StoreCloser-TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/C/a87e27cf2302473fad06a9e2caabb5c5 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/C/a87e27cf2302473fad06a9e2caabb5c5 2024-11-27T16:23:23,351 DEBUG [StoreCloser-TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/C/73e7961fa2094c2286e3b553d2a4fd72 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/C/73e7961fa2094c2286e3b553d2a4fd72 2024-11-27T16:23:23,352 DEBUG [StoreCloser-TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/C/cf43fa8566dc4feab1b584de59cd4db9 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/C/cf43fa8566dc4feab1b584de59cd4db9 2024-11-27T16:23:23,353 DEBUG [StoreCloser-TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/C/d3f30e7e760a4225b9c2d3b2599c943e to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/C/d3f30e7e760a4225b9c2d3b2599c943e 2024-11-27T16:23:23,355 DEBUG [StoreCloser-TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/C/4be17d9b935745c893c0d2615defaba3 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/C/4be17d9b935745c893c0d2615defaba3 2024-11-27T16:23:23,356 DEBUG [StoreCloser-TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/C/27a6d4aae8904a31a0f5a5c2c19fafdc to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/C/27a6d4aae8904a31a0f5a5c2c19fafdc 2024-11-27T16:23:23,357 DEBUG [StoreCloser-TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/C/8e9827543ccf4ab291448b32af8e18c0 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/C/8e9827543ccf4ab291448b32af8e18c0 2024-11-27T16:23:23,359 DEBUG [StoreCloser-TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/C/db9576b0a23c42a6a00a197db4a989c5 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/C/db9576b0a23c42a6a00a197db4a989c5 2024-11-27T16:23:23,360 DEBUG [StoreCloser-TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/C/19dd11dbe9db49de8303b7cfb00240d3 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/C/19dd11dbe9db49de8303b7cfb00240d3 2024-11-27T16:23:23,361 DEBUG [StoreCloser-TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/C/2d9bb4bd29d1480f9a8fba2008205144 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/C/2d9bb4bd29d1480f9a8fba2008205144 2024-11-27T16:23:23,362 DEBUG [StoreCloser-TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/C/1046c5ead2e841f688deba7486999267 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/C/1046c5ead2e841f688deba7486999267 2024-11-27T16:23:23,363 DEBUG [StoreCloser-TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/C/20faed288a4c4b2aae624e73c58c80e2 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/C/20faed288a4c4b2aae624e73c58c80e2 2024-11-27T16:23:23,364 DEBUG [StoreCloser-TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/C/314d9b4e2cfc40aaa2cf314f680ffc33 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/C/314d9b4e2cfc40aaa2cf314f680ffc33 2024-11-27T16:23:23,365 DEBUG [StoreCloser-TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/C/43d1ef25bef2444dba94850911364d33 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/C/43d1ef25bef2444dba94850911364d33 2024-11-27T16:23:23,366 DEBUG [StoreCloser-TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/C/bf08b6c45de542c5bc5a4303df36b7af to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/C/bf08b6c45de542c5bc5a4303df36b7af 2024-11-27T16:23:23,367 DEBUG [StoreCloser-TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/C/cab721fa76c04a3db7a5ef73194c2bf1 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/C/cab721fa76c04a3db7a5ef73194c2bf1 2024-11-27T16:23:23,369 DEBUG [StoreCloser-TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/C/3441ad56dc0044d4badd5e09d7e6dcff to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/C/3441ad56dc0044d4badd5e09d7e6dcff 2024-11-27T16:23:23,369 DEBUG [StoreCloser-TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/C/aa00dc7007ad490298c8a020931bd42e to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/C/aa00dc7007ad490298c8a020931bd42e 2024-11-27T16:23:23,371 DEBUG [StoreCloser-TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/C/325e959dab6a410d824f264134169cf5 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/C/325e959dab6a410d824f264134169cf5 2024-11-27T16:23:23,372 DEBUG [StoreCloser-TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/C/8ebf6f1914084a51aeb9fe8fe02c5faf to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/C/8ebf6f1914084a51aeb9fe8fe02c5faf 2024-11-27T16:23:23,372 DEBUG [StoreCloser-TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/C/a0052fb15f9c469fa44cf1350f650d12 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/C/a0052fb15f9c469fa44cf1350f650d12 2024-11-27T16:23:23,373 DEBUG [StoreCloser-TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/C/6699b72357a945aca96020e22129b03e to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/C/6699b72357a945aca96020e22129b03e 2024-11-27T16:23:23,374 DEBUG [StoreCloser-TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/C/f0741e447a0f4104b392c62fe4ced00c to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/C/f0741e447a0f4104b392c62fe4ced00c 2024-11-27T16:23:23,375 DEBUG [StoreCloser-TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/C/a80c895dc24f408080ccec613b2c533a to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/C/a80c895dc24f408080ccec613b2c533a 2024-11-27T16:23:23,377 DEBUG [StoreCloser-TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/C/59c38ccfa5b74759a91333b880d512bd to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/C/59c38ccfa5b74759a91333b880d512bd 2024-11-27T16:23:23,378 DEBUG [StoreCloser-TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/C/7437d1c3b25b4f3c968a8595076467e4 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/C/7437d1c3b25b4f3c968a8595076467e4 2024-11-27T16:23:23,379 DEBUG [StoreCloser-TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/C/2a90cc06e1754d31b1d2ef5b9c499b1d to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/C/2a90cc06e1754d31b1d2ef5b9c499b1d 2024-11-27T16:23:23,380 DEBUG [StoreCloser-TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/C/43e05408466d42569a4e8051dee0e621 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/C/43e05408466d42569a4e8051dee0e621 2024-11-27T16:23:23,381 DEBUG [StoreCloser-TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/C/529e2e8e231d4e1b9abc39612abf5e42 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/C/529e2e8e231d4e1b9abc39612abf5e42 2024-11-27T16:23:23,382 DEBUG [StoreCloser-TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/C/7186d5fde2244e1b9816b82de79900ce to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/C/7186d5fde2244e1b9816b82de79900ce 2024-11-27T16:23:23,384 DEBUG [StoreCloser-TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/C/2950bb6aa7d141dd9dd46481ad5ab175 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/C/2950bb6aa7d141dd9dd46481ad5ab175 2024-11-27T16:23:23,385 DEBUG [StoreCloser-TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/C/14dcbb8ddfe748dd9543842772e16fd1 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/C/14dcbb8ddfe748dd9543842772e16fd1 2024-11-27T16:23:23,386 DEBUG [StoreCloser-TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/C/ee7b1752fb6e46a6a9021f9377602447 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/C/ee7b1752fb6e46a6a9021f9377602447 2024-11-27T16:23:23,387 DEBUG [StoreCloser-TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/C/a44ec08f2bd541e396deef15fc6925bc to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/C/a44ec08f2bd541e396deef15fc6925bc 2024-11-27T16:23:23,388 DEBUG [StoreCloser-TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/C/f7922bce7a4c4289bca4fd4d6493222c to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/C/f7922bce7a4c4289bca4fd4d6493222c 2024-11-27T16:23:23,389 DEBUG [StoreCloser-TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/C/1d86837c25774122a5ac89e65672bab7 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/C/1d86837c25774122a5ac89e65672bab7 2024-11-27T16:23:23,391 DEBUG [StoreCloser-TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/C/161d1ae182084c189a52c45551d13ffe to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/C/161d1ae182084c189a52c45551d13ffe 2024-11-27T16:23:23,392 DEBUG [StoreCloser-TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/C/cff6fe14ca2e4aad91ea331c79c83f53 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/C/cff6fe14ca2e4aad91ea331c79c83f53 2024-11-27T16:23:23,393 DEBUG [StoreCloser-TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/C/72ff5ca082474f7ba52cd96a33d75f08 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/C/72ff5ca082474f7ba52cd96a33d75f08 2024-11-27T16:23:23,394 DEBUG [StoreCloser-TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/C/cbdc448e2e984a488a9f943abfdfa663 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/C/cbdc448e2e984a488a9f943abfdfa663 2024-11-27T16:23:23,395 DEBUG [StoreCloser-TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/C/bec4f1a459554817823b24840daa24c6 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/C/bec4f1a459554817823b24840daa24c6 2024-11-27T16:23:23,396 DEBUG [StoreCloser-TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/C/e2f7f49962114a34b9a936431c208744 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/C/e2f7f49962114a34b9a936431c208744 2024-11-27T16:23:23,397 DEBUG [StoreCloser-TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/C/f41a89400ed345ceb89edeb17d0fd77a to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/C/f41a89400ed345ceb89edeb17d0fd77a 2024-11-27T16:23:23,398 DEBUG [StoreCloser-TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/C/7b06a1b9108e417fb62cbb7071334a80 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/C/7b06a1b9108e417fb62cbb7071334a80 2024-11-27T16:23:23,400 DEBUG [StoreCloser-TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/C/9fd47d61c04c4dc5a651fd28b82f2efc to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/C/9fd47d61c04c4dc5a651fd28b82f2efc 2024-11-27T16:23:23,401 DEBUG [StoreCloser-TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/C/097ce23af4c74fe0b4cb7ab13f7330f8 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/C/097ce23af4c74fe0b4cb7ab13f7330f8 2024-11-27T16:23:23,406 DEBUG [RS_CLOSE_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_CLOSE_REGION, pid=147}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/recovered.edits/704.seqid, newMaxSeqId=704, maxSeqId=1 2024-11-27T16:23:23,407 INFO [RS_CLOSE_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_CLOSE_REGION, pid=147}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172. 2024-11-27T16:23:23,407 DEBUG [RS_CLOSE_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_CLOSE_REGION, pid=147}] regionserver.HRegion(1635): Region close journal for 00f170535dc2739662302d98f22dc172: 2024-11-27T16:23:23,408 INFO [RS_CLOSE_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_CLOSE_REGION, pid=147}] handler.UnassignRegionHandler(170): Closed 00f170535dc2739662302d98f22dc172 2024-11-27T16:23:23,408 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=146 updating hbase:meta row=00f170535dc2739662302d98f22dc172, regionState=CLOSED 2024-11-27T16:23:23,411 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=147, resume processing ppid=146 2024-11-27T16:23:23,411 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=147, ppid=146, state=SUCCESS; CloseRegionProcedure 00f170535dc2739662302d98f22dc172, server=7b191dec6496,44169,1732724452967 in 1.2180 sec 2024-11-27T16:23:23,412 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=146, resume processing ppid=145 2024-11-27T16:23:23,412 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=146, ppid=145, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=00f170535dc2739662302d98f22dc172, UNASSIGN in 1.2220 sec 2024-11-27T16:23:23,414 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=145, resume processing ppid=144 2024-11-27T16:23:23,414 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=145, ppid=144, state=SUCCESS; CloseTableRegionsProcedure table=TestAcidGuarantees in 1.2250 sec 2024-11-27T16:23:23,415 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732724603415"}]},"ts":"1732724603415"} 2024-11-27T16:23:23,416 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLED in hbase:meta 2024-11-27T16:23:23,418 INFO [PEWorker-1 {}] procedure.DisableTableProcedure(296): Set TestAcidGuarantees to state=DISABLED 2024-11-27T16:23:23,420 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=144, state=SUCCESS; DisableTableProcedure table=TestAcidGuarantees in 1.2420 sec 2024-11-27T16:23:24,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=144 2024-11-27T16:23:24,287 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:TestAcidGuarantees, procId: 144 completed 2024-11-27T16:23:24,287 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete TestAcidGuarantees 2024-11-27T16:23:24,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] procedure2.ProcedureExecutor(1098): Stored pid=148, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=TestAcidGuarantees 2024-11-27T16:23:24,289 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=148, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-27T16:23:24,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=148 2024-11-27T16:23:24,290 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=148, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-27T16:23:24,291 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172 2024-11-27T16:23:24,294 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/A, FileablePath, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/B, FileablePath, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/C, FileablePath, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/recovered.edits] 2024-11-27T16:23:24,296 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/A/041fe1207c1b4b2cb74d3d079d82c73e to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/A/041fe1207c1b4b2cb74d3d079d82c73e 2024-11-27T16:23:24,297 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/A/53c1fafcefc94091b09c88ce0de6bad0 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/A/53c1fafcefc94091b09c88ce0de6bad0 2024-11-27T16:23:24,299 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/A/c4f5571043854c39af2b3f7fb36ecebd to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/A/c4f5571043854c39af2b3f7fb36ecebd 2024-11-27T16:23:24,301 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/B/661cb39e5f3845b492b6f204c8d6b732 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/B/661cb39e5f3845b492b6f204c8d6b732 2024-11-27T16:23:24,302 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/B/839b0f3fb2824177ba01c4ab774d2d19 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/B/839b0f3fb2824177ba01c4ab774d2d19 2024-11-27T16:23:24,303 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/B/cf71af47a8ed4c85b1f4cc60d89b40b2 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/B/cf71af47a8ed4c85b1f4cc60d89b40b2 2024-11-27T16:23:24,305 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/C/d1881ac599684ef1813767c0209f0509 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/C/d1881ac599684ef1813767c0209f0509 2024-11-27T16:23:24,306 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/C/f7084a6a6d2b4e86b1d933200e45731c to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/C/f7084a6a6d2b4e86b1d933200e45731c 2024-11-27T16:23:24,310 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/recovered.edits/704.seqid to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172/recovered.edits/704.seqid 2024-11-27T16:23:24,311 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/00f170535dc2739662302d98f22dc172 2024-11-27T16:23:24,311 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(313): Archived TestAcidGuarantees regions 2024-11-27T16:23:24,313 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=148, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-27T16:23:24,315 WARN [PEWorker-2 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 1 rows of TestAcidGuarantees from hbase:meta 2024-11-27T16:23:24,317 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(407): Removing 'TestAcidGuarantees' descriptor. 2024-11-27T16:23:24,318 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=148, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-27T16:23:24,318 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(397): Removing 'TestAcidGuarantees' from region states. 2024-11-27T16:23:24,318 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732724604318"}]},"ts":"9223372036854775807"} 2024-11-27T16:23:24,320 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1808): Deleted 1 regions from META 2024-11-27T16:23:24,320 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => 00f170535dc2739662302d98f22dc172, NAME => 'TestAcidGuarantees,,1732724580903.00f170535dc2739662302d98f22dc172.', STARTKEY => '', ENDKEY => ''}] 2024-11-27T16:23:24,320 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(401): Marking 'TestAcidGuarantees' as deleted. 2024-11-27T16:23:24,320 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1732724604320"}]},"ts":"9223372036854775807"} 2024-11-27T16:23:24,325 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1678): Deleted table TestAcidGuarantees state from META 2024-11-27T16:23:24,327 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(133): Finished pid=148, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-27T16:23:24,328 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=148, state=SUCCESS; DeleteTableProcedure table=TestAcidGuarantees in 40 msec 2024-11-27T16:23:24,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=148 2024-11-27T16:23:24,390 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:TestAcidGuarantees, procId: 148 completed 2024-11-27T16:23:24,403 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: TestAcidGuaranteesWithBasicPolicy#testScanAtomicity Thread=239 (was 239), OpenFileDescriptor=450 (was 451), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=831 (was 786) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=3799 (was 3846) 2024-11-27T16:23:24,413 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: TestAcidGuaranteesWithBasicPolicy#testMobGetAtomicity Thread=239, OpenFileDescriptor=450, MaxFileDescriptor=1048576, SystemLoadAverage=831, ProcessCount=11, AvailableMemoryMB=3798 2024-11-27T16:23:24,414 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-11-27T16:23:24,415 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'BASIC'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-27T16:23:24,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] procedure2.ProcedureExecutor(1098): Stored pid=149, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestAcidGuarantees 2024-11-27T16:23:24,417 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=149, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_PRE_OPERATION 2024-11-27T16:23:24,417 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:24,417 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestAcidGuarantees" procId is: 149 2024-11-27T16:23:24,417 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=149, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-27T16:23:24,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=149 2024-11-27T16:23:24,423 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742476_1652 (size=960) 2024-11-27T16:23:24,425 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 771007c821cc85e485653aceb22dba4b, NAME => 'TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'BASIC', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59 2024-11-27T16:23:24,430 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742477_1653 (size=53) 2024-11-27T16:23:24,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=149 2024-11-27T16:23:24,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=149 2024-11-27T16:23:24,832 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-27T16:23:24,832 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1681): Closing 771007c821cc85e485653aceb22dba4b, disabling compactions & flushes 2024-11-27T16:23:24,832 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b. 2024-11-27T16:23:24,832 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b. 2024-11-27T16:23:24,832 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b. after waiting 0 ms 2024-11-27T16:23:24,832 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b. 2024-11-27T16:23:24,832 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b. 2024-11-27T16:23:24,832 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1635): Region close journal for 771007c821cc85e485653aceb22dba4b: 2024-11-27T16:23:24,833 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=149, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ADD_TO_META 2024-11-27T16:23:24,833 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b.","families":{"info":[{"qualifier":"regioninfo","vlen":52,"tag":[],"timestamp":"1732724604833"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732724604833"}]},"ts":"1732724604833"} 2024-11-27T16:23:24,835 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-11-27T16:23:24,835 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=149, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-27T16:23:24,836 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732724604835"}]},"ts":"1732724604835"} 2024-11-27T16:23:24,836 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLING in hbase:meta 2024-11-27T16:23:24,841 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=150, ppid=149, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=771007c821cc85e485653aceb22dba4b, ASSIGN}] 2024-11-27T16:23:24,842 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=150, ppid=149, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=771007c821cc85e485653aceb22dba4b, ASSIGN 2024-11-27T16:23:24,842 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(264): Starting pid=150, ppid=149, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=771007c821cc85e485653aceb22dba4b, ASSIGN; state=OFFLINE, location=7b191dec6496,44169,1732724452967; forceNewPlan=false, retain=false 2024-11-27T16:23:24,993 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=150 updating hbase:meta row=771007c821cc85e485653aceb22dba4b, regionState=OPENING, regionLocation=7b191dec6496,44169,1732724452967 2024-11-27T16:23:24,994 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=151, ppid=150, state=RUNNABLE; OpenRegionProcedure 771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967}] 2024-11-27T16:23:25,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=149 2024-11-27T16:23:25,146 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:23:25,149 INFO [RS_OPEN_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_OPEN_REGION, pid=151}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b. 2024-11-27T16:23:25,149 DEBUG [RS_OPEN_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_OPEN_REGION, pid=151}] regionserver.HRegion(7285): Opening region: {ENCODED => 771007c821cc85e485653aceb22dba4b, NAME => 'TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b.', STARTKEY => '', ENDKEY => ''} 2024-11-27T16:23:25,149 DEBUG [RS_OPEN_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_OPEN_REGION, pid=151}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees 771007c821cc85e485653aceb22dba4b 2024-11-27T16:23:25,149 DEBUG [RS_OPEN_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_OPEN_REGION, pid=151}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-27T16:23:25,150 DEBUG [RS_OPEN_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_OPEN_REGION, pid=151}] regionserver.HRegion(7327): checking encryption for 771007c821cc85e485653aceb22dba4b 2024-11-27T16:23:25,150 DEBUG [RS_OPEN_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_OPEN_REGION, pid=151}] regionserver.HRegion(7330): checking classloading for 771007c821cc85e485653aceb22dba4b 2024-11-27T16:23:25,151 INFO [StoreOpener-771007c821cc85e485653aceb22dba4b-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region 771007c821cc85e485653aceb22dba4b 2024-11-27T16:23:25,152 INFO [StoreOpener-771007c821cc85e485653aceb22dba4b-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-27T16:23:25,152 INFO [StoreOpener-771007c821cc85e485653aceb22dba4b-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 771007c821cc85e485653aceb22dba4b columnFamilyName A 2024-11-27T16:23:25,153 DEBUG [StoreOpener-771007c821cc85e485653aceb22dba4b-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:25,154 INFO [StoreOpener-771007c821cc85e485653aceb22dba4b-1 {}] regionserver.HStore(327): Store=771007c821cc85e485653aceb22dba4b/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-27T16:23:25,154 INFO [StoreOpener-771007c821cc85e485653aceb22dba4b-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region 771007c821cc85e485653aceb22dba4b 2024-11-27T16:23:25,155 INFO [StoreOpener-771007c821cc85e485653aceb22dba4b-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-27T16:23:25,155 INFO [StoreOpener-771007c821cc85e485653aceb22dba4b-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 771007c821cc85e485653aceb22dba4b columnFamilyName B 2024-11-27T16:23:25,155 DEBUG [StoreOpener-771007c821cc85e485653aceb22dba4b-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:25,155 INFO [StoreOpener-771007c821cc85e485653aceb22dba4b-1 {}] regionserver.HStore(327): Store=771007c821cc85e485653aceb22dba4b/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-27T16:23:25,156 INFO [StoreOpener-771007c821cc85e485653aceb22dba4b-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region 771007c821cc85e485653aceb22dba4b 2024-11-27T16:23:25,157 INFO [StoreOpener-771007c821cc85e485653aceb22dba4b-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-27T16:23:25,157 INFO [StoreOpener-771007c821cc85e485653aceb22dba4b-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 771007c821cc85e485653aceb22dba4b columnFamilyName C 2024-11-27T16:23:25,157 DEBUG [StoreOpener-771007c821cc85e485653aceb22dba4b-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:25,157 INFO [StoreOpener-771007c821cc85e485653aceb22dba4b-1 {}] regionserver.HStore(327): Store=771007c821cc85e485653aceb22dba4b/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-27T16:23:25,157 INFO [RS_OPEN_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_OPEN_REGION, pid=151}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b. 2024-11-27T16:23:25,158 DEBUG [RS_OPEN_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_OPEN_REGION, pid=151}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b 2024-11-27T16:23:25,158 DEBUG [RS_OPEN_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_OPEN_REGION, pid=151}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b 2024-11-27T16:23:25,160 DEBUG [RS_OPEN_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_OPEN_REGION, pid=151}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-27T16:23:25,162 DEBUG [RS_OPEN_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_OPEN_REGION, pid=151}] regionserver.HRegion(1085): writing seq id for 771007c821cc85e485653aceb22dba4b 2024-11-27T16:23:25,164 DEBUG [RS_OPEN_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_OPEN_REGION, pid=151}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-27T16:23:25,164 INFO [RS_OPEN_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_OPEN_REGION, pid=151}] regionserver.HRegion(1102): Opened 771007c821cc85e485653aceb22dba4b; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=74740678, jitterRate=0.11372289061546326}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-27T16:23:25,165 DEBUG [RS_OPEN_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_OPEN_REGION, pid=151}] regionserver.HRegion(1001): Region open journal for 771007c821cc85e485653aceb22dba4b: 2024-11-27T16:23:25,166 INFO [RS_OPEN_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_OPEN_REGION, pid=151}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b., pid=151, masterSystemTime=1732724605146 2024-11-27T16:23:25,167 DEBUG [RS_OPEN_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_OPEN_REGION, pid=151}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b. 2024-11-27T16:23:25,167 INFO [RS_OPEN_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_OPEN_REGION, pid=151}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b. 2024-11-27T16:23:25,168 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=150 updating hbase:meta row=771007c821cc85e485653aceb22dba4b, regionState=OPEN, openSeqNum=2, regionLocation=7b191dec6496,44169,1732724452967 2024-11-27T16:23:25,170 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=151, resume processing ppid=150 2024-11-27T16:23:25,170 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=151, ppid=150, state=SUCCESS; OpenRegionProcedure 771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 in 175 msec 2024-11-27T16:23:25,172 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=150, resume processing ppid=149 2024-11-27T16:23:25,172 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=150, ppid=149, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=771007c821cc85e485653aceb22dba4b, ASSIGN in 329 msec 2024-11-27T16:23:25,173 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=149, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-27T16:23:25,173 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732724605173"}]},"ts":"1732724605173"} 2024-11-27T16:23:25,174 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLED in hbase:meta 2024-11-27T16:23:25,177 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=149, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_POST_OPERATION 2024-11-27T16:23:25,179 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=149, state=SUCCESS; CreateTableProcedure table=TestAcidGuarantees in 762 msec 2024-11-27T16:23:25,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=149 2024-11-27T16:23:25,521 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestAcidGuarantees, procId: 149 completed 2024-11-27T16:23:25,523 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x366de26d to 127.0.0.1:51088 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@6364386e 2024-11-27T16:23:25,533 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@582ec26d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-27T16:23:25,537 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-27T16:23:25,538 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49344, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-27T16:23:25,539 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-27T16:23:25,540 INFO [RS-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58244, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-27T16:23:25,542 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-11-27T16:23:25,542 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster$14(2798): Client=jenkins//172.17.0.2 modify table TestAcidGuarantees from 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'BASIC', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} to 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'BASIC', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '4', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-27T16:23:25,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] procedure2.ProcedureExecutor(1098): Stored pid=152, state=RUNNABLE:MODIFY_TABLE_PREPARE; ModifyTableProcedure table=TestAcidGuarantees 2024-11-27T16:23:25,553 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742478_1654 (size=996) 2024-11-27T16:23:25,555 DEBUG [PEWorker-4 {}] util.FSTableDescriptors(519): Deleted hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/.tabledesc/.tableinfo.0000000001.960 2024-11-27T16:23:25,555 INFO [PEWorker-4 {}] util.FSTableDescriptors(297): Updated tableinfo=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/.tabledesc/.tableinfo.0000000002.996 2024-11-27T16:23:25,561 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=153, ppid=152, state=RUNNABLE:REOPEN_TABLE_REGIONS_GET_REGIONS; ReopenTableRegionsProcedure table=TestAcidGuarantees}] 2024-11-27T16:23:25,563 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=154, ppid=153, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=771007c821cc85e485653aceb22dba4b, REOPEN/MOVE}] 2024-11-27T16:23:25,564 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=154, ppid=153, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=771007c821cc85e485653aceb22dba4b, REOPEN/MOVE 2024-11-27T16:23:25,565 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=154 updating hbase:meta row=771007c821cc85e485653aceb22dba4b, regionState=CLOSING, regionLocation=7b191dec6496,44169,1732724452967 2024-11-27T16:23:25,566 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-27T16:23:25,566 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=155, ppid=154, state=RUNNABLE; CloseRegionProcedure 771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967}] 2024-11-27T16:23:25,717 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:23:25,717 INFO [RS_CLOSE_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_CLOSE_REGION, pid=155}] handler.UnassignRegionHandler(124): Close 771007c821cc85e485653aceb22dba4b 2024-11-27T16:23:25,718 DEBUG [RS_CLOSE_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_CLOSE_REGION, pid=155}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-11-27T16:23:25,718 DEBUG [RS_CLOSE_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_CLOSE_REGION, pid=155}] regionserver.HRegion(1681): Closing 771007c821cc85e485653aceb22dba4b, disabling compactions & flushes 2024-11-27T16:23:25,718 INFO [RS_CLOSE_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_CLOSE_REGION, pid=155}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b. 2024-11-27T16:23:25,718 DEBUG [RS_CLOSE_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_CLOSE_REGION, pid=155}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b. 2024-11-27T16:23:25,718 DEBUG [RS_CLOSE_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_CLOSE_REGION, pid=155}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b. after waiting 0 ms 2024-11-27T16:23:25,718 DEBUG [RS_CLOSE_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_CLOSE_REGION, pid=155}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b. 2024-11-27T16:23:25,721 DEBUG [RS_CLOSE_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_CLOSE_REGION, pid=155}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/recovered.edits/4.seqid, newMaxSeqId=4, maxSeqId=1 2024-11-27T16:23:25,722 INFO [RS_CLOSE_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_CLOSE_REGION, pid=155}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b. 2024-11-27T16:23:25,722 DEBUG [RS_CLOSE_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_CLOSE_REGION, pid=155}] regionserver.HRegion(1635): Region close journal for 771007c821cc85e485653aceb22dba4b: 2024-11-27T16:23:25,722 WARN [RS_CLOSE_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_CLOSE_REGION, pid=155}] regionserver.HRegionServer(3786): Not adding moved region record: 771007c821cc85e485653aceb22dba4b to self. 2024-11-27T16:23:25,723 INFO [RS_CLOSE_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_CLOSE_REGION, pid=155}] handler.UnassignRegionHandler(170): Closed 771007c821cc85e485653aceb22dba4b 2024-11-27T16:23:25,724 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=154 updating hbase:meta row=771007c821cc85e485653aceb22dba4b, regionState=CLOSED 2024-11-27T16:23:25,726 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=155, resume processing ppid=154 2024-11-27T16:23:25,726 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=155, ppid=154, state=SUCCESS; CloseRegionProcedure 771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 in 159 msec 2024-11-27T16:23:25,726 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(264): Starting pid=154, ppid=153, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=771007c821cc85e485653aceb22dba4b, REOPEN/MOVE; state=CLOSED, location=7b191dec6496,44169,1732724452967; forceNewPlan=false, retain=true 2024-11-27T16:23:25,877 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=154 updating hbase:meta row=771007c821cc85e485653aceb22dba4b, regionState=OPENING, regionLocation=7b191dec6496,44169,1732724452967 2024-11-27T16:23:25,878 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=156, ppid=154, state=RUNNABLE; OpenRegionProcedure 771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967}] 2024-11-27T16:23:26,030 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:23:26,033 INFO [RS_OPEN_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_OPEN_REGION, pid=156}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b. 2024-11-27T16:23:26,033 DEBUG [RS_OPEN_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_OPEN_REGION, pid=156}] regionserver.HRegion(7285): Opening region: {ENCODED => 771007c821cc85e485653aceb22dba4b, NAME => 'TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b.', STARTKEY => '', ENDKEY => ''} 2024-11-27T16:23:26,033 DEBUG [RS_OPEN_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_OPEN_REGION, pid=156}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees 771007c821cc85e485653aceb22dba4b 2024-11-27T16:23:26,033 DEBUG [RS_OPEN_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_OPEN_REGION, pid=156}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-27T16:23:26,034 DEBUG [RS_OPEN_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_OPEN_REGION, pid=156}] regionserver.HRegion(7327): checking encryption for 771007c821cc85e485653aceb22dba4b 2024-11-27T16:23:26,034 DEBUG [RS_OPEN_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_OPEN_REGION, pid=156}] regionserver.HRegion(7330): checking classloading for 771007c821cc85e485653aceb22dba4b 2024-11-27T16:23:26,035 INFO [StoreOpener-771007c821cc85e485653aceb22dba4b-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region 771007c821cc85e485653aceb22dba4b 2024-11-27T16:23:26,036 INFO [StoreOpener-771007c821cc85e485653aceb22dba4b-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-27T16:23:26,036 INFO [StoreOpener-771007c821cc85e485653aceb22dba4b-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 771007c821cc85e485653aceb22dba4b columnFamilyName A 2024-11-27T16:23:26,038 DEBUG [StoreOpener-771007c821cc85e485653aceb22dba4b-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:26,038 INFO [StoreOpener-771007c821cc85e485653aceb22dba4b-1 {}] regionserver.HStore(327): Store=771007c821cc85e485653aceb22dba4b/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-27T16:23:26,039 INFO [StoreOpener-771007c821cc85e485653aceb22dba4b-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region 771007c821cc85e485653aceb22dba4b 2024-11-27T16:23:26,040 INFO [StoreOpener-771007c821cc85e485653aceb22dba4b-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-27T16:23:26,040 INFO [StoreOpener-771007c821cc85e485653aceb22dba4b-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 771007c821cc85e485653aceb22dba4b columnFamilyName B 2024-11-27T16:23:26,040 DEBUG [StoreOpener-771007c821cc85e485653aceb22dba4b-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:26,041 INFO [StoreOpener-771007c821cc85e485653aceb22dba4b-1 {}] regionserver.HStore(327): Store=771007c821cc85e485653aceb22dba4b/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-27T16:23:26,041 INFO [StoreOpener-771007c821cc85e485653aceb22dba4b-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region 771007c821cc85e485653aceb22dba4b 2024-11-27T16:23:26,041 INFO [StoreOpener-771007c821cc85e485653aceb22dba4b-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-27T16:23:26,041 INFO [StoreOpener-771007c821cc85e485653aceb22dba4b-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 771007c821cc85e485653aceb22dba4b columnFamilyName C 2024-11-27T16:23:26,041 DEBUG [StoreOpener-771007c821cc85e485653aceb22dba4b-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:26,043 INFO [StoreOpener-771007c821cc85e485653aceb22dba4b-1 {}] regionserver.HStore(327): Store=771007c821cc85e485653aceb22dba4b/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-27T16:23:26,043 INFO [RS_OPEN_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_OPEN_REGION, pid=156}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b. 2024-11-27T16:23:26,044 DEBUG [RS_OPEN_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_OPEN_REGION, pid=156}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b 2024-11-27T16:23:26,045 DEBUG [RS_OPEN_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_OPEN_REGION, pid=156}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b 2024-11-27T16:23:26,046 DEBUG [RS_OPEN_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_OPEN_REGION, pid=156}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-27T16:23:26,047 DEBUG [RS_OPEN_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_OPEN_REGION, pid=156}] regionserver.HRegion(1085): writing seq id for 771007c821cc85e485653aceb22dba4b 2024-11-27T16:23:26,048 INFO [RS_OPEN_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_OPEN_REGION, pid=156}] regionserver.HRegion(1102): Opened 771007c821cc85e485653aceb22dba4b; next sequenceid=5; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=67126872, jitterRate=2.683401107788086E-4}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-27T16:23:26,049 DEBUG [RS_OPEN_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_OPEN_REGION, pid=156}] regionserver.HRegion(1001): Region open journal for 771007c821cc85e485653aceb22dba4b: 2024-11-27T16:23:26,050 INFO [RS_OPEN_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_OPEN_REGION, pid=156}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b., pid=156, masterSystemTime=1732724606030 2024-11-27T16:23:26,051 DEBUG [RS_OPEN_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_OPEN_REGION, pid=156}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b. 2024-11-27T16:23:26,051 INFO [RS_OPEN_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_OPEN_REGION, pid=156}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b. 2024-11-27T16:23:26,052 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=154 updating hbase:meta row=771007c821cc85e485653aceb22dba4b, regionState=OPEN, openSeqNum=5, regionLocation=7b191dec6496,44169,1732724452967 2024-11-27T16:23:26,054 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=156, resume processing ppid=154 2024-11-27T16:23:26,054 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=156, ppid=154, state=SUCCESS; OpenRegionProcedure 771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 in 175 msec 2024-11-27T16:23:26,055 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=154, resume processing ppid=153 2024-11-27T16:23:26,055 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=154, ppid=153, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=771007c821cc85e485653aceb22dba4b, REOPEN/MOVE in 491 msec 2024-11-27T16:23:26,057 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=153, resume processing ppid=152 2024-11-27T16:23:26,057 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=153, ppid=152, state=SUCCESS; ReopenTableRegionsProcedure table=TestAcidGuarantees in 494 msec 2024-11-27T16:23:26,058 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=152, state=SUCCESS; ModifyTableProcedure table=TestAcidGuarantees in 514 msec 2024-11-27T16:23:26,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=152 2024-11-27T16:23:26,061 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x095ad211 to 127.0.0.1:51088 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@4bbf3c1c 2024-11-27T16:23:26,064 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@65aca2ac, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-27T16:23:26,065 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x031adbce to 127.0.0.1:51088 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@31f7586d 2024-11-27T16:23:26,076 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@314e353d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-27T16:23:26,077 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x4276b1e9 to 127.0.0.1:51088 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@4949adfa 2024-11-27T16:23:26,084 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@96e8e33, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-27T16:23:26,086 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x574dd3aa to 127.0.0.1:51088 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@53ef82c4 2024-11-27T16:23:26,092 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2e8d919c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-27T16:23:26,093 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x29247c18 to 127.0.0.1:51088 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@672325a 2024-11-27T16:23:26,101 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@44b14279, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-27T16:23:26,101 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x205568ef to 127.0.0.1:51088 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@6eb94416 2024-11-27T16:23:26,120 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3395eba8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-27T16:23:26,122 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x1c6fde8c to 127.0.0.1:51088 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@3852b0e3 2024-11-27T16:23:26,137 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2406c4ea, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-27T16:23:26,138 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x675cc1c7 to 127.0.0.1:51088 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@4aa4b067 2024-11-27T16:23:26,146 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@58970c4d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-27T16:23:26,147 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x73e9c98b to 127.0.0.1:51088 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@1ca17819 2024-11-27T16:23:26,161 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7cb4faa4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-27T16:23:26,162 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x60507b8f to 127.0.0.1:51088 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@77a6a62c 2024-11-27T16:23:26,184 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2c1c03a6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-27T16:23:26,190 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-27T16:23:26,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] procedure2.ProcedureExecutor(1098): Stored pid=157, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=157, table=TestAcidGuarantees 2024-11-27T16:23:26,191 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=157, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=157, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-27T16:23:26,192 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=157, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=157, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-27T16:23:26,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=157 2024-11-27T16:23:26,192 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=158, ppid=157, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-27T16:23:26,204 DEBUG [hconnection-0x709aace7-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-27T16:23:26,205 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49348, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-27T16:23:26,244 DEBUG [hconnection-0x63677ca8-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-27T16:23:26,245 DEBUG [hconnection-0xf7b902d-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-27T16:23:26,245 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49354, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-27T16:23:26,245 DEBUG [hconnection-0x55b6271-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-27T16:23:26,246 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49364, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-27T16:23:26,247 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49368, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-27T16:23:26,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(8581): Flush requested on 771007c821cc85e485653aceb22dba4b 2024-11-27T16:23:26,250 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 771007c821cc85e485653aceb22dba4b 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-27T16:23:26,250 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 771007c821cc85e485653aceb22dba4b, store=A 2024-11-27T16:23:26,250 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:23:26,250 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 771007c821cc85e485653aceb22dba4b, store=B 2024-11-27T16:23:26,250 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:23:26,250 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 771007c821cc85e485653aceb22dba4b, store=C 2024-11-27T16:23:26,250 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:23:26,263 DEBUG [hconnection-0x3814f061-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-27T16:23:26,265 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49376, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-27T16:23:26,265 DEBUG [hconnection-0x6ee7e123-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-27T16:23:26,266 DEBUG [hconnection-0x690cd11c-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-27T16:23:26,266 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49380, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-27T16:23:26,267 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49384, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-27T16:23:26,265 DEBUG [hconnection-0x4bf2f881-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-27T16:23:26,269 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49390, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-27T16:23:26,273 DEBUG [hconnection-0x28583d4a-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-27T16:23:26,275 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49402, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-27T16:23:26,292 DEBUG [hconnection-0x5c81f63e-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-27T16:23:26,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=157 2024-11-27T16:23:26,293 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49412, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-27T16:23:26,308 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411270a53d7b5652444cc9149786f92255ac5_771007c821cc85e485653aceb22dba4b is 50, key is test_row_0/A:col10/1732724606242/Put/seqid=0 2024-11-27T16:23:26,316 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:26,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 3 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49390 deadline: 1732724666313, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:26,319 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:26,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 3 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49412 deadline: 1732724666314, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:26,319 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:26,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 6 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49380 deadline: 1732724666314, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:26,319 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:26,319 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:26,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49348 deadline: 1732724666316, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:26,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49368 deadline: 1732724666314, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:26,344 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:23:26,344 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742479_1655 (size=12154) 2024-11-27T16:23:26,345 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=158 2024-11-27T16:23:26,345 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b. 2024-11-27T16:23:26,345 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b. as already flushing 2024-11-27T16:23:26,346 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b. 2024-11-27T16:23:26,346 ERROR [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] handler.RSProcedureHandler(58): pid=158 java.io.IOException: Unable to complete flush {ENCODED => 771007c821cc85e485653aceb22dba4b, NAME => 'TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:23:26,346 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=158 java.io.IOException: Unable to complete flush {ENCODED => 771007c821cc85e485653aceb22dba4b, NAME => 'TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:23:26,347 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:26,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4114): Remote procedure failed, pid=158 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 771007c821cc85e485653aceb22dba4b, NAME => 'TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 771007c821cc85e485653aceb22dba4b, NAME => 'TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:23:26,352 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411270a53d7b5652444cc9149786f92255ac5_771007c821cc85e485653aceb22dba4b to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411270a53d7b5652444cc9149786f92255ac5_771007c821cc85e485653aceb22dba4b 2024-11-27T16:23:26,354 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/.tmp/A/c769303f10bd41c8adb8f7cb97f451c2, store: [table=TestAcidGuarantees family=A region=771007c821cc85e485653aceb22dba4b] 2024-11-27T16:23:26,355 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/.tmp/A/c769303f10bd41c8adb8f7cb97f451c2 is 175, key is test_row_0/A:col10/1732724606242/Put/seqid=0 2024-11-27T16:23:26,390 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742480_1656 (size=30955) 2024-11-27T16:23:26,391 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=15, memsize=17.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/.tmp/A/c769303f10bd41c8adb8f7cb97f451c2 2024-11-27T16:23:26,419 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/.tmp/B/68a11f410ea947e9b5ddbac368285266 is 50, key is test_row_0/B:col10/1732724606242/Put/seqid=0 2024-11-27T16:23:26,421 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:26,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 5 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49390 deadline: 1732724666418, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:26,422 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:26,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 5 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49412 deadline: 1732724666420, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:26,423 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:26,423 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:26,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49368 deadline: 1732724666420, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:26,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49380 deadline: 1732724666421, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:26,424 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:26,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49348 deadline: 1732724666421, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:26,451 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742481_1657 (size=12001) 2024-11-27T16:23:26,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=157 2024-11-27T16:23:26,498 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:23:26,499 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=158 2024-11-27T16:23:26,499 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b. 2024-11-27T16:23:26,499 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b. as already flushing 2024-11-27T16:23:26,499 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b. 2024-11-27T16:23:26,499 ERROR [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] handler.RSProcedureHandler(58): pid=158 java.io.IOException: Unable to complete flush {ENCODED => 771007c821cc85e485653aceb22dba4b, NAME => 'TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:23:26,499 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=158 java.io.IOException: Unable to complete flush {ENCODED => 771007c821cc85e485653aceb22dba4b, NAME => 'TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:23:26,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4114): Remote procedure failed, pid=158 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 771007c821cc85e485653aceb22dba4b, NAME => 'TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 771007c821cc85e485653aceb22dba4b, NAME => 'TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:23:26,625 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:26,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49390 deadline: 1732724666624, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:26,625 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:26,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49368 deadline: 1732724666624, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:26,638 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:26,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49348 deadline: 1732724666636, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:26,639 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:26,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49380 deadline: 1732724666636, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:26,639 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:26,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49412 deadline: 1732724666637, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:26,651 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:23:26,652 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=158 2024-11-27T16:23:26,652 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b. 2024-11-27T16:23:26,652 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b. as already flushing 2024-11-27T16:23:26,652 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b. 2024-11-27T16:23:26,652 ERROR [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] handler.RSProcedureHandler(58): pid=158 java.io.IOException: Unable to complete flush {ENCODED => 771007c821cc85e485653aceb22dba4b, NAME => 'TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:23:26,652 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=158 java.io.IOException: Unable to complete flush {ENCODED => 771007c821cc85e485653aceb22dba4b, NAME => 'TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:23:26,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4114): Remote procedure failed, pid=158 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 771007c821cc85e485653aceb22dba4b, NAME => 'TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 771007c821cc85e485653aceb22dba4b, NAME => 'TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:23:26,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=157 2024-11-27T16:23:26,804 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:23:26,808 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=158 2024-11-27T16:23:26,808 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b. 2024-11-27T16:23:26,808 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b. as already flushing 2024-11-27T16:23:26,808 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b. 2024-11-27T16:23:26,808 ERROR [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] handler.RSProcedureHandler(58): pid=158 java.io.IOException: Unable to complete flush {ENCODED => 771007c821cc85e485653aceb22dba4b, NAME => 'TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:23:26,808 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=158 java.io.IOException: Unable to complete flush {ENCODED => 771007c821cc85e485653aceb22dba4b, NAME => 'TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:23:26,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4114): Remote procedure failed, pid=158 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 771007c821cc85e485653aceb22dba4b, NAME => 'TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 771007c821cc85e485653aceb22dba4b, NAME => 'TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:23:26,857 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=15 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/.tmp/B/68a11f410ea947e9b5ddbac368285266 2024-11-27T16:23:26,929 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:26,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49368 deadline: 1732724666927, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:26,930 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:26,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49390 deadline: 1732724666928, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:26,945 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:26,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49380 deadline: 1732724666942, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:26,945 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:26,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49348 deadline: 1732724666943, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:26,946 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:26,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49412 deadline: 1732724666944, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:26,960 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:23:26,961 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=158 2024-11-27T16:23:26,961 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b. 2024-11-27T16:23:26,961 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b. as already flushing 2024-11-27T16:23:26,961 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b. 2024-11-27T16:23:26,961 ERROR [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] handler.RSProcedureHandler(58): pid=158 java.io.IOException: Unable to complete flush {ENCODED => 771007c821cc85e485653aceb22dba4b, NAME => 'TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:23:26,961 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=158 java.io.IOException: Unable to complete flush {ENCODED => 771007c821cc85e485653aceb22dba4b, NAME => 'TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:23:26,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4114): Remote procedure failed, pid=158 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 771007c821cc85e485653aceb22dba4b, NAME => 'TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 771007c821cc85e485653aceb22dba4b, NAME => 'TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:23:26,987 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/.tmp/C/6f7d6b56624f4e40899d7e345b3bb59f is 50, key is test_row_0/C:col10/1732724606242/Put/seqid=0 2024-11-27T16:23:27,019 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742482_1658 (size=12001) 2024-11-27T16:23:27,113 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:23:27,114 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=158 2024-11-27T16:23:27,114 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b. 2024-11-27T16:23:27,114 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b. as already flushing 2024-11-27T16:23:27,114 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b. 2024-11-27T16:23:27,114 ERROR [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] handler.RSProcedureHandler(58): pid=158 java.io.IOException: Unable to complete flush {ENCODED => 771007c821cc85e485653aceb22dba4b, NAME => 'TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:23:27,115 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=158 java.io.IOException: Unable to complete flush {ENCODED => 771007c821cc85e485653aceb22dba4b, NAME => 'TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:23:27,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4114): Remote procedure failed, pid=158 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 771007c821cc85e485653aceb22dba4b, NAME => 'TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 771007c821cc85e485653aceb22dba4b, NAME => 'TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:23:27,267 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:23:27,268 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=158 2024-11-27T16:23:27,268 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b. 2024-11-27T16:23:27,268 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b. as already flushing 2024-11-27T16:23:27,268 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b. 2024-11-27T16:23:27,268 ERROR [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] handler.RSProcedureHandler(58): pid=158 java.io.IOException: Unable to complete flush {ENCODED => 771007c821cc85e485653aceb22dba4b, NAME => 'TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:23:27,268 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=158 java.io.IOException: Unable to complete flush {ENCODED => 771007c821cc85e485653aceb22dba4b, NAME => 'TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:23:27,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4114): Remote procedure failed, pid=158 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 771007c821cc85e485653aceb22dba4b, NAME => 'TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 771007c821cc85e485653aceb22dba4b, NAME => 'TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:23:27,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=157 2024-11-27T16:23:27,420 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:23:27,420 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=158 2024-11-27T16:23:27,420 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b. 2024-11-27T16:23:27,420 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=15 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/.tmp/C/6f7d6b56624f4e40899d7e345b3bb59f 2024-11-27T16:23:27,420 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b. as already flushing 2024-11-27T16:23:27,420 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b. 2024-11-27T16:23:27,420 ERROR [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] handler.RSProcedureHandler(58): pid=158 java.io.IOException: Unable to complete flush {ENCODED => 771007c821cc85e485653aceb22dba4b, NAME => 'TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:23:27,421 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=158 java.io.IOException: Unable to complete flush {ENCODED => 771007c821cc85e485653aceb22dba4b, NAME => 'TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:23:27,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4114): Remote procedure failed, pid=158 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 771007c821cc85e485653aceb22dba4b, NAME => 'TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 771007c821cc85e485653aceb22dba4b, NAME => 'TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:23:27,424 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/.tmp/A/c769303f10bd41c8adb8f7cb97f451c2 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/A/c769303f10bd41c8adb8f7cb97f451c2 2024-11-27T16:23:27,428 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/A/c769303f10bd41c8adb8f7cb97f451c2, entries=150, sequenceid=15, filesize=30.2 K 2024-11-27T16:23:27,428 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/.tmp/B/68a11f410ea947e9b5ddbac368285266 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/B/68a11f410ea947e9b5ddbac368285266 2024-11-27T16:23:27,432 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/B/68a11f410ea947e9b5ddbac368285266, entries=150, sequenceid=15, filesize=11.7 K 2024-11-27T16:23:27,432 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/.tmp/C/6f7d6b56624f4e40899d7e345b3bb59f as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/C/6f7d6b56624f4e40899d7e345b3bb59f 2024-11-27T16:23:27,434 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:27,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49390 deadline: 1732724667433, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:27,435 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:27,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49368 deadline: 1732724667435, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:27,436 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/C/6f7d6b56624f4e40899d7e345b3bb59f, entries=150, sequenceid=15, filesize=11.7 K 2024-11-27T16:23:27,437 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=154.31 KB/158010 for 771007c821cc85e485653aceb22dba4b in 1187ms, sequenceid=15, compaction requested=false 2024-11-27T16:23:27,437 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 771007c821cc85e485653aceb22dba4b: 2024-11-27T16:23:27,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(8581): Flush requested on 771007c821cc85e485653aceb22dba4b 2024-11-27T16:23:27,449 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 771007c821cc85e485653aceb22dba4b 3/3 column families, dataSize=161.02 KB heapSize=422.63 KB 2024-11-27T16:23:27,449 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 771007c821cc85e485653aceb22dba4b, store=A 2024-11-27T16:23:27,449 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:23:27,449 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 771007c821cc85e485653aceb22dba4b, store=B 2024-11-27T16:23:27,449 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:23:27,449 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 771007c821cc85e485653aceb22dba4b, store=C 2024-11-27T16:23:27,449 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:23:27,455 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241127d6e12cc70b04452cbe0b0b3966d59787_771007c821cc85e485653aceb22dba4b is 50, key is test_row_0/A:col10/1732724607447/Put/seqid=0 2024-11-27T16:23:27,457 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:27,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49412 deadline: 1732724667455, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:27,458 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742483_1659 (size=12154) 2024-11-27T16:23:27,459 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:27,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49348 deadline: 1732724667457, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:27,459 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:27,459 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:27,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49380 deadline: 1732724667457, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:27,462 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241127d6e12cc70b04452cbe0b0b3966d59787_771007c821cc85e485653aceb22dba4b to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241127d6e12cc70b04452cbe0b0b3966d59787_771007c821cc85e485653aceb22dba4b 2024-11-27T16:23:27,463 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/.tmp/A/6a97e462c4ff4afd8c0fe7d1a7158a95, store: [table=TestAcidGuarantees family=A region=771007c821cc85e485653aceb22dba4b] 2024-11-27T16:23:27,463 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/.tmp/A/6a97e462c4ff4afd8c0fe7d1a7158a95 is 175, key is test_row_0/A:col10/1732724607447/Put/seqid=0 2024-11-27T16:23:27,466 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742484_1660 (size=30955) 2024-11-27T16:23:27,467 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=42, memsize=53.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/.tmp/A/6a97e462c4ff4afd8c0fe7d1a7158a95 2024-11-27T16:23:27,476 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/.tmp/B/08b218d73e7546bdb57e490353da8f0d is 50, key is test_row_0/B:col10/1732724607447/Put/seqid=0 2024-11-27T16:23:27,479 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742485_1661 (size=12001) 2024-11-27T16:23:27,559 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:27,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49412 deadline: 1732724667558, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:27,561 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:27,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49348 deadline: 1732724667560, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:27,561 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:27,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49380 deadline: 1732724667560, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:27,573 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:23:27,573 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=158 2024-11-27T16:23:27,573 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b. 2024-11-27T16:23:27,573 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b. as already flushing 2024-11-27T16:23:27,573 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b. 2024-11-27T16:23:27,573 ERROR [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] handler.RSProcedureHandler(58): pid=158 java.io.IOException: Unable to complete flush {ENCODED => 771007c821cc85e485653aceb22dba4b, NAME => 'TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:23:27,573 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=158 java.io.IOException: Unable to complete flush {ENCODED => 771007c821cc85e485653aceb22dba4b, NAME => 'TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:23:27,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4114): Remote procedure failed, pid=158 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 771007c821cc85e485653aceb22dba4b, NAME => 'TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 771007c821cc85e485653aceb22dba4b, NAME => 'TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:23:27,725 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:23:27,726 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=158 2024-11-27T16:23:27,726 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b. 2024-11-27T16:23:27,726 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b. as already flushing 2024-11-27T16:23:27,726 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b. 2024-11-27T16:23:27,726 ERROR [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] handler.RSProcedureHandler(58): pid=158 java.io.IOException: Unable to complete flush {ENCODED => 771007c821cc85e485653aceb22dba4b, NAME => 'TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:23:27,726 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=158 java.io.IOException: Unable to complete flush {ENCODED => 771007c821cc85e485653aceb22dba4b, NAME => 'TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:23:27,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4114): Remote procedure failed, pid=158 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 771007c821cc85e485653aceb22dba4b, NAME => 'TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 771007c821cc85e485653aceb22dba4b, NAME => 'TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:23:27,762 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:27,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49412 deadline: 1732724667760, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:27,763 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:27,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49348 deadline: 1732724667762, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:27,765 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:27,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49380 deadline: 1732724667763, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:27,878 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:23:27,879 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=158 2024-11-27T16:23:27,879 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b. 2024-11-27T16:23:27,879 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b. as already flushing 2024-11-27T16:23:27,879 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b. 2024-11-27T16:23:27,879 ERROR [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] handler.RSProcedureHandler(58): pid=158 java.io.IOException: Unable to complete flush {ENCODED => 771007c821cc85e485653aceb22dba4b, NAME => 'TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:23:27,879 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=158 java.io.IOException: Unable to complete flush {ENCODED => 771007c821cc85e485653aceb22dba4b, NAME => 'TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:23:27,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4114): Remote procedure failed, pid=158 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 771007c821cc85e485653aceb22dba4b, NAME => 'TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 771007c821cc85e485653aceb22dba4b, NAME => 'TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:23:27,881 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=42 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/.tmp/B/08b218d73e7546bdb57e490353da8f0d 2024-11-27T16:23:27,888 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/.tmp/C/76618a60baee4a28b4696aac925b23c8 is 50, key is test_row_0/C:col10/1732724607447/Put/seqid=0 2024-11-27T16:23:27,891 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742486_1662 (size=12001) 2024-11-27T16:23:28,031 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:23:28,032 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=158 2024-11-27T16:23:28,032 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b. 2024-11-27T16:23:28,032 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b. as already flushing 2024-11-27T16:23:28,032 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b. 2024-11-27T16:23:28,032 ERROR [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] handler.RSProcedureHandler(58): pid=158 java.io.IOException: Unable to complete flush {ENCODED => 771007c821cc85e485653aceb22dba4b, NAME => 'TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:23:28,032 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=158 java.io.IOException: Unable to complete flush {ENCODED => 771007c821cc85e485653aceb22dba4b, NAME => 'TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:23:28,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4114): Remote procedure failed, pid=158 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 771007c821cc85e485653aceb22dba4b, NAME => 'TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 771007c821cc85e485653aceb22dba4b, NAME => 'TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:23:28,064 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:28,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49348 deadline: 1732724668064, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:28,066 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:28,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49412 deadline: 1732724668065, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:28,067 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:28,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49380 deadline: 1732724668066, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:28,184 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:23:28,184 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=158 2024-11-27T16:23:28,184 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b. 2024-11-27T16:23:28,185 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b. as already flushing 2024-11-27T16:23:28,185 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b. 2024-11-27T16:23:28,185 ERROR [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] handler.RSProcedureHandler(58): pid=158 java.io.IOException: Unable to complete flush {ENCODED => 771007c821cc85e485653aceb22dba4b, NAME => 'TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:23:28,185 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=158 java.io.IOException: Unable to complete flush {ENCODED => 771007c821cc85e485653aceb22dba4b, NAME => 'TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:23:28,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4114): Remote procedure failed, pid=158 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 771007c821cc85e485653aceb22dba4b, NAME => 'TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 771007c821cc85e485653aceb22dba4b, NAME => 'TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:23:28,292 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=42 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/.tmp/C/76618a60baee4a28b4696aac925b23c8 2024-11-27T16:23:28,296 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/.tmp/A/6a97e462c4ff4afd8c0fe7d1a7158a95 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/A/6a97e462c4ff4afd8c0fe7d1a7158a95 2024-11-27T16:23:28,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=157 2024-11-27T16:23:28,299 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/A/6a97e462c4ff4afd8c0fe7d1a7158a95, entries=150, sequenceid=42, filesize=30.2 K 2024-11-27T16:23:28,300 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/.tmp/B/08b218d73e7546bdb57e490353da8f0d as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/B/08b218d73e7546bdb57e490353da8f0d 2024-11-27T16:23:28,307 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/B/08b218d73e7546bdb57e490353da8f0d, entries=150, sequenceid=42, filesize=11.7 K 2024-11-27T16:23:28,307 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/.tmp/C/76618a60baee4a28b4696aac925b23c8 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/C/76618a60baee4a28b4696aac925b23c8 2024-11-27T16:23:28,311 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/C/76618a60baee4a28b4696aac925b23c8, entries=150, sequenceid=42, filesize=11.7 K 2024-11-27T16:23:28,312 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~161.02 KB/164880, heapSize ~422.58 KB/432720, currentSize=46.96 KB/48090 for 771007c821cc85e485653aceb22dba4b in 862ms, sequenceid=42, compaction requested=false 2024-11-27T16:23:28,312 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 771007c821cc85e485653aceb22dba4b: 2024-11-27T16:23:28,337 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:23:28,337 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=158 2024-11-27T16:23:28,337 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b. 2024-11-27T16:23:28,337 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HRegion(2837): Flushing 771007c821cc85e485653aceb22dba4b 3/3 column families, dataSize=46.96 KB heapSize=123.80 KB 2024-11-27T16:23:28,338 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 771007c821cc85e485653aceb22dba4b, store=A 2024-11-27T16:23:28,338 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:23:28,338 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 771007c821cc85e485653aceb22dba4b, store=B 2024-11-27T16:23:28,338 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:23:28,338 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 771007c821cc85e485653aceb22dba4b, store=C 2024-11-27T16:23:28,338 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:23:28,343 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241127faa53db3dddf4057b66b6fa116accc8c_771007c821cc85e485653aceb22dba4b is 50, key is test_row_0/A:col10/1732724607451/Put/seqid=0 2024-11-27T16:23:28,357 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742487_1663 (size=12154) 2024-11-27T16:23:28,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(8581): Flush requested on 771007c821cc85e485653aceb22dba4b 2024-11-27T16:23:28,442 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b. as already flushing 2024-11-27T16:23:28,474 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:28,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49368 deadline: 1732724668471, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:28,474 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:28,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49390 deadline: 1732724668472, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:28,567 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:28,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49348 deadline: 1732724668567, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:28,572 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:28,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49412 deadline: 1732724668570, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:28,573 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:28,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49380 deadline: 1732724668570, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:28,577 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:28,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49390 deadline: 1732724668575, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:28,577 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:28,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49368 deadline: 1732724668575, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:28,758 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:28,761 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241127faa53db3dddf4057b66b6fa116accc8c_771007c821cc85e485653aceb22dba4b to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241127faa53db3dddf4057b66b6fa116accc8c_771007c821cc85e485653aceb22dba4b 2024-11-27T16:23:28,762 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/.tmp/A/f70e6599e09c4af584ccd96b67e646ea, store: [table=TestAcidGuarantees family=A region=771007c821cc85e485653aceb22dba4b] 2024-11-27T16:23:28,763 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/.tmp/A/f70e6599e09c4af584ccd96b67e646ea is 175, key is test_row_0/A:col10/1732724607451/Put/seqid=0 2024-11-27T16:23:28,767 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742488_1664 (size=30955) 2024-11-27T16:23:28,780 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:28,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49390 deadline: 1732724668779, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:28,781 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:28,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49368 deadline: 1732724668779, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:28,929 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-11-27T16:23:29,083 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:29,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49390 deadline: 1732724669082, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:29,084 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:29,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49368 deadline: 1732724669083, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:29,168 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=52, memsize=15.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/.tmp/A/f70e6599e09c4af584ccd96b67e646ea 2024-11-27T16:23:29,183 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/.tmp/B/10740185d1fc4a74aaa4923736c6cbb3 is 50, key is test_row_0/B:col10/1732724607451/Put/seqid=0 2024-11-27T16:23:29,197 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742489_1665 (size=12001) 2024-11-27T16:23:29,577 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:29,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49380 deadline: 1732724669576, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:29,579 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:29,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49348 deadline: 1732724669578, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:29,584 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:29,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49412 deadline: 1732724669581, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:29,587 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:29,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49390 deadline: 1732724669585, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:29,590 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:29,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49368 deadline: 1732724669589, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:29,598 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=52 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/.tmp/B/10740185d1fc4a74aaa4923736c6cbb3 2024-11-27T16:23:29,604 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/.tmp/C/fd1900434fdb47c59b945b8c2c3c3ce2 is 50, key is test_row_0/C:col10/1732724607451/Put/seqid=0 2024-11-27T16:23:29,625 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742490_1666 (size=12001) 2024-11-27T16:23:30,025 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=52 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/.tmp/C/fd1900434fdb47c59b945b8c2c3c3ce2 2024-11-27T16:23:30,029 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/.tmp/A/f70e6599e09c4af584ccd96b67e646ea as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/A/f70e6599e09c4af584ccd96b67e646ea 2024-11-27T16:23:30,032 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/A/f70e6599e09c4af584ccd96b67e646ea, entries=150, sequenceid=52, filesize=30.2 K 2024-11-27T16:23:30,032 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/.tmp/B/10740185d1fc4a74aaa4923736c6cbb3 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/B/10740185d1fc4a74aaa4923736c6cbb3 2024-11-27T16:23:30,035 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/B/10740185d1fc4a74aaa4923736c6cbb3, entries=150, sequenceid=52, filesize=11.7 K 2024-11-27T16:23:30,036 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/.tmp/C/fd1900434fdb47c59b945b8c2c3c3ce2 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/C/fd1900434fdb47c59b945b8c2c3c3ce2 2024-11-27T16:23:30,039 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/C/fd1900434fdb47c59b945b8c2c3c3ce2, entries=150, sequenceid=52, filesize=11.7 K 2024-11-27T16:23:30,039 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HRegion(3040): Finished flush of dataSize ~46.96 KB/48090, heapSize ~123.75 KB/126720, currentSize=154.31 KB/158010 for 771007c821cc85e485653aceb22dba4b in 1702ms, sequenceid=52, compaction requested=true 2024-11-27T16:23:30,040 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HRegion(2538): Flush status journal for 771007c821cc85e485653aceb22dba4b: 2024-11-27T16:23:30,040 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b. 2024-11-27T16:23:30,040 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=158 2024-11-27T16:23:30,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4106): Remote procedure done, pid=158 2024-11-27T16:23:30,042 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=158, resume processing ppid=157 2024-11-27T16:23:30,042 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=158, ppid=157, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 3.8490 sec 2024-11-27T16:23:30,044 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=157, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=157, table=TestAcidGuarantees in 3.8520 sec 2024-11-27T16:23:30,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=157 2024-11-27T16:23:30,298 INFO [Thread-2863 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 157 completed 2024-11-27T16:23:30,299 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-27T16:23:30,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] procedure2.ProcedureExecutor(1098): Stored pid=159, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=159, table=TestAcidGuarantees 2024-11-27T16:23:30,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=159 2024-11-27T16:23:30,301 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=159, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=159, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-27T16:23:30,301 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=159, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=159, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-27T16:23:30,301 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=160, ppid=159, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-27T16:23:30,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=159 2024-11-27T16:23:30,452 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:23:30,453 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=160 2024-11-27T16:23:30,453 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b. 2024-11-27T16:23:30,453 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HRegion(2837): Flushing 771007c821cc85e485653aceb22dba4b 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-11-27T16:23:30,454 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 771007c821cc85e485653aceb22dba4b, store=A 2024-11-27T16:23:30,454 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:23:30,454 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 771007c821cc85e485653aceb22dba4b, store=B 2024-11-27T16:23:30,454 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:23:30,454 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 771007c821cc85e485653aceb22dba4b, store=C 2024-11-27T16:23:30,454 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:23:30,459 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411273d86bdf4119e436a890db5f14dc2b7f2_771007c821cc85e485653aceb22dba4b is 50, key is test_row_0/A:col10/1732724608468/Put/seqid=0 2024-11-27T16:23:30,463 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742491_1667 (size=12154) 2024-11-27T16:23:30,463 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:30,466 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411273d86bdf4119e436a890db5f14dc2b7f2_771007c821cc85e485653aceb22dba4b to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411273d86bdf4119e436a890db5f14dc2b7f2_771007c821cc85e485653aceb22dba4b 2024-11-27T16:23:30,468 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/.tmp/A/50ebfba186954f0fae62d8a5193b4572, store: [table=TestAcidGuarantees family=A region=771007c821cc85e485653aceb22dba4b] 2024-11-27T16:23:30,469 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/.tmp/A/50ebfba186954f0fae62d8a5193b4572 is 175, key is test_row_0/A:col10/1732724608468/Put/seqid=0 2024-11-27T16:23:30,472 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742492_1668 (size=30955) 2024-11-27T16:23:30,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(8581): Flush requested on 771007c821cc85e485653aceb22dba4b 2024-11-27T16:23:30,592 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b. as already flushing 2024-11-27T16:23:30,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=159 2024-11-27T16:23:30,605 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:30,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49390 deadline: 1732724670603, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:30,606 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:30,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49368 deadline: 1732724670605, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:30,707 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:30,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49390 deadline: 1732724670706, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:30,709 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:30,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49368 deadline: 1732724670707, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:30,873 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=78, memsize=51.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/.tmp/A/50ebfba186954f0fae62d8a5193b4572 2024-11-27T16:23:30,881 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/.tmp/B/db1b28dff7044aa4a7a18eac5233176c is 50, key is test_row_0/B:col10/1732724608468/Put/seqid=0 2024-11-27T16:23:30,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=159 2024-11-27T16:23:30,909 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742493_1669 (size=12001) 2024-11-27T16:23:30,910 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=78 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/.tmp/B/db1b28dff7044aa4a7a18eac5233176c 2024-11-27T16:23:30,911 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:30,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49390 deadline: 1732724670909, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:30,911 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:30,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49368 deadline: 1732724670910, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:30,917 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/.tmp/C/1cf56d1afb2a44568b1fabaedc38ac2c is 50, key is test_row_0/C:col10/1732724608468/Put/seqid=0 2024-11-27T16:23:30,921 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742494_1670 (size=12001) 2024-11-27T16:23:30,922 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=78 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/.tmp/C/1cf56d1afb2a44568b1fabaedc38ac2c 2024-11-27T16:23:30,926 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/.tmp/A/50ebfba186954f0fae62d8a5193b4572 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/A/50ebfba186954f0fae62d8a5193b4572 2024-11-27T16:23:30,928 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/A/50ebfba186954f0fae62d8a5193b4572, entries=150, sequenceid=78, filesize=30.2 K 2024-11-27T16:23:30,929 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/.tmp/B/db1b28dff7044aa4a7a18eac5233176c as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/B/db1b28dff7044aa4a7a18eac5233176c 2024-11-27T16:23:30,932 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/B/db1b28dff7044aa4a7a18eac5233176c, entries=150, sequenceid=78, filesize=11.7 K 2024-11-27T16:23:30,932 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/.tmp/C/1cf56d1afb2a44568b1fabaedc38ac2c as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/C/1cf56d1afb2a44568b1fabaedc38ac2c 2024-11-27T16:23:30,935 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/C/1cf56d1afb2a44568b1fabaedc38ac2c, entries=150, sequenceid=78, filesize=11.7 K 2024-11-27T16:23:30,935 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=46.96 KB/48090 for 771007c821cc85e485653aceb22dba4b in 482ms, sequenceid=78, compaction requested=true 2024-11-27T16:23:30,936 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HRegion(2538): Flush status journal for 771007c821cc85e485653aceb22dba4b: 2024-11-27T16:23:30,936 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b. 2024-11-27T16:23:30,936 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=160 2024-11-27T16:23:30,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4106): Remote procedure done, pid=160 2024-11-27T16:23:30,938 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=160, resume processing ppid=159 2024-11-27T16:23:30,938 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=160, ppid=159, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 635 msec 2024-11-27T16:23:30,940 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=159, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=159, table=TestAcidGuarantees in 639 msec 2024-11-27T16:23:31,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(8581): Flush requested on 771007c821cc85e485653aceb22dba4b 2024-11-27T16:23:31,215 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 771007c821cc85e485653aceb22dba4b 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-27T16:23:31,215 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 771007c821cc85e485653aceb22dba4b, store=A 2024-11-27T16:23:31,215 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:23:31,215 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 771007c821cc85e485653aceb22dba4b, store=B 2024-11-27T16:23:31,215 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:23:31,215 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 771007c821cc85e485653aceb22dba4b, store=C 2024-11-27T16:23:31,215 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:23:31,221 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411275a22cf24bea04b0c85baac7d496f236f_771007c821cc85e485653aceb22dba4b is 50, key is test_row_0/A:col10/1732724610601/Put/seqid=0 2024-11-27T16:23:31,225 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742495_1671 (size=12154) 2024-11-27T16:23:31,269 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:31,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49368 deadline: 1732724671266, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:31,269 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:31,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49390 deadline: 1732724671266, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:31,372 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:31,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49368 deadline: 1732724671370, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:31,373 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:31,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49390 deadline: 1732724671371, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:31,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=159 2024-11-27T16:23:31,404 INFO [Thread-2863 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 159 completed 2024-11-27T16:23:31,405 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-27T16:23:31,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] procedure2.ProcedureExecutor(1098): Stored pid=161, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=161, table=TestAcidGuarantees 2024-11-27T16:23:31,406 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=161, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=161, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-27T16:23:31,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=161 2024-11-27T16:23:31,407 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=161, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=161, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-27T16:23:31,407 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=162, ppid=161, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-27T16:23:31,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=161 2024-11-27T16:23:31,558 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:23:31,559 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=162 2024-11-27T16:23:31,559 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b. 2024-11-27T16:23:31,559 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b. as already flushing 2024-11-27T16:23:31,559 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b. 2024-11-27T16:23:31,559 ERROR [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] handler.RSProcedureHandler(58): pid=162 java.io.IOException: Unable to complete flush {ENCODED => 771007c821cc85e485653aceb22dba4b, NAME => 'TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:23:31,559 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=162 java.io.IOException: Unable to complete flush {ENCODED => 771007c821cc85e485653aceb22dba4b, NAME => 'TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:23:31,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4114): Remote procedure failed, pid=162 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 771007c821cc85e485653aceb22dba4b, NAME => 'TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 771007c821cc85e485653aceb22dba4b, NAME => 'TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:23:31,575 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:31,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49368 deadline: 1732724671573, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:31,578 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:31,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49390 deadline: 1732724671575, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:31,590 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:31,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49348 deadline: 1732724671589, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:31,591 DEBUG [Thread-2857 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4135 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b., hostname=7b191dec6496,44169,1732724452967, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-27T16:23:31,591 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:31,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49412 deadline: 1732724671590, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:31,592 DEBUG [Thread-2853 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4138 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b., hostname=7b191dec6496,44169,1732724452967, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-27T16:23:31,593 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:31,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49380 deadline: 1732724671591, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:31,594 DEBUG [Thread-2861 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4136 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b., hostname=7b191dec6496,44169,1732724452967, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-27T16:23:31,625 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:31,629 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411275a22cf24bea04b0c85baac7d496f236f_771007c821cc85e485653aceb22dba4b to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411275a22cf24bea04b0c85baac7d496f236f_771007c821cc85e485653aceb22dba4b 2024-11-27T16:23:31,629 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/.tmp/A/68bc21f36f5848f98db23990c6183ff4, store: [table=TestAcidGuarantees family=A region=771007c821cc85e485653aceb22dba4b] 2024-11-27T16:23:31,630 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/.tmp/A/68bc21f36f5848f98db23990c6183ff4 is 175, key is test_row_0/A:col10/1732724610601/Put/seqid=0 2024-11-27T16:23:31,634 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742496_1672 (size=30955) 2024-11-27T16:23:31,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=161 2024-11-27T16:23:31,711 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:23:31,712 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=162 2024-11-27T16:23:31,712 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b. 2024-11-27T16:23:31,712 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b. as already flushing 2024-11-27T16:23:31,712 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b. 2024-11-27T16:23:31,712 ERROR [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] handler.RSProcedureHandler(58): pid=162 java.io.IOException: Unable to complete flush {ENCODED => 771007c821cc85e485653aceb22dba4b, NAME => 'TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:23:31,712 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=162 java.io.IOException: Unable to complete flush {ENCODED => 771007c821cc85e485653aceb22dba4b, NAME => 'TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:23:31,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4114): Remote procedure failed, pid=162 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 771007c821cc85e485653aceb22dba4b, NAME => 'TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 771007c821cc85e485653aceb22dba4b, NAME => 'TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:23:31,864 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:23:31,864 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=162 2024-11-27T16:23:31,865 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b. 2024-11-27T16:23:31,865 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b. as already flushing 2024-11-27T16:23:31,865 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b. 2024-11-27T16:23:31,865 ERROR [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] handler.RSProcedureHandler(58): pid=162 java.io.IOException: Unable to complete flush {ENCODED => 771007c821cc85e485653aceb22dba4b, NAME => 'TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:23:31,865 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=162 java.io.IOException: Unable to complete flush {ENCODED => 771007c821cc85e485653aceb22dba4b, NAME => 'TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:23:31,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4114): Remote procedure failed, pid=162 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 771007c821cc85e485653aceb22dba4b, NAME => 'TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 771007c821cc85e485653aceb22dba4b, NAME => 'TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:23:31,878 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:31,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49368 deadline: 1732724671876, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:31,882 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:31,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49390 deadline: 1732724671881, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:32,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=161 2024-11-27T16:23:32,016 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:23:32,017 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=162 2024-11-27T16:23:32,017 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b. 2024-11-27T16:23:32,017 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b. as already flushing 2024-11-27T16:23:32,017 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b. 2024-11-27T16:23:32,017 ERROR [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] handler.RSProcedureHandler(58): pid=162 java.io.IOException: Unable to complete flush {ENCODED => 771007c821cc85e485653aceb22dba4b, NAME => 'TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:23:32,017 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=162 java.io.IOException: Unable to complete flush {ENCODED => 771007c821cc85e485653aceb22dba4b, NAME => 'TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:23:32,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4114): Remote procedure failed, pid=162 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 771007c821cc85e485653aceb22dba4b, NAME => 'TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 771007c821cc85e485653aceb22dba4b, NAME => 'TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:23:32,035 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=89, memsize=17.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/.tmp/A/68bc21f36f5848f98db23990c6183ff4 2024-11-27T16:23:32,041 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/.tmp/B/bdcf10f210da4969baec322bf6766b23 is 50, key is test_row_0/B:col10/1732724610601/Put/seqid=0 2024-11-27T16:23:32,045 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742497_1673 (size=12001) 2024-11-27T16:23:32,169 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:23:32,170 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=162 2024-11-27T16:23:32,170 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b. 2024-11-27T16:23:32,170 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b. as already flushing 2024-11-27T16:23:32,170 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b. 2024-11-27T16:23:32,170 ERROR [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] handler.RSProcedureHandler(58): pid=162 java.io.IOException: Unable to complete flush {ENCODED => 771007c821cc85e485653aceb22dba4b, NAME => 'TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:23:32,170 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=162 java.io.IOException: Unable to complete flush {ENCODED => 771007c821cc85e485653aceb22dba4b, NAME => 'TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:23:32,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4114): Remote procedure failed, pid=162 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 771007c821cc85e485653aceb22dba4b, NAME => 'TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 771007c821cc85e485653aceb22dba4b, NAME => 'TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:23:32,322 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:23:32,322 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=162 2024-11-27T16:23:32,322 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b. 2024-11-27T16:23:32,322 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b. as already flushing 2024-11-27T16:23:32,322 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b. 2024-11-27T16:23:32,322 ERROR [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] handler.RSProcedureHandler(58): pid=162 java.io.IOException: Unable to complete flush {ENCODED => 771007c821cc85e485653aceb22dba4b, NAME => 'TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:23:32,323 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=162 java.io.IOException: Unable to complete flush {ENCODED => 771007c821cc85e485653aceb22dba4b, NAME => 'TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:23:32,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4114): Remote procedure failed, pid=162 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 771007c821cc85e485653aceb22dba4b, NAME => 'TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 771007c821cc85e485653aceb22dba4b, NAME => 'TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:23:32,384 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:32,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49368 deadline: 1732724672381, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:32,385 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:32,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49390 deadline: 1732724672383, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:32,445 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=89 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/.tmp/B/bdcf10f210da4969baec322bf6766b23 2024-11-27T16:23:32,452 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/.tmp/C/a9ff9b6cfbdf4703a428cfd836eb52e0 is 50, key is test_row_0/C:col10/1732724610601/Put/seqid=0 2024-11-27T16:23:32,456 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742498_1674 (size=12001) 2024-11-27T16:23:32,456 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=89 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/.tmp/C/a9ff9b6cfbdf4703a428cfd836eb52e0 2024-11-27T16:23:32,460 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/.tmp/A/68bc21f36f5848f98db23990c6183ff4 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/A/68bc21f36f5848f98db23990c6183ff4 2024-11-27T16:23:32,466 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/A/68bc21f36f5848f98db23990c6183ff4, entries=150, sequenceid=89, filesize=30.2 K 2024-11-27T16:23:32,467 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/.tmp/B/bdcf10f210da4969baec322bf6766b23 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/B/bdcf10f210da4969baec322bf6766b23 2024-11-27T16:23:32,470 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/B/bdcf10f210da4969baec322bf6766b23, entries=150, sequenceid=89, filesize=11.7 K 2024-11-27T16:23:32,471 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/.tmp/C/a9ff9b6cfbdf4703a428cfd836eb52e0 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/C/a9ff9b6cfbdf4703a428cfd836eb52e0 2024-11-27T16:23:32,474 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/C/a9ff9b6cfbdf4703a428cfd836eb52e0, entries=150, sequenceid=89, filesize=11.7 K 2024-11-27T16:23:32,474 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:23:32,474 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=162 2024-11-27T16:23:32,475 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b. 2024-11-27T16:23:32,475 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b. as already flushing 2024-11-27T16:23:32,475 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for 771007c821cc85e485653aceb22dba4b in 1260ms, sequenceid=89, compaction requested=true 2024-11-27T16:23:32,475 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b. 2024-11-27T16:23:32,475 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 771007c821cc85e485653aceb22dba4b: 2024-11-27T16:23:32,475 ERROR [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] handler.RSProcedureHandler(58): pid=162 java.io.IOException: Unable to complete flush {ENCODED => 771007c821cc85e485653aceb22dba4b, NAME => 'TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:23:32,475 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 5 store files, 0 compacting, 5 eligible, 16 blocking 2024-11-27T16:23:32,475 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=162 java.io.IOException: Unable to complete flush {ENCODED => 771007c821cc85e485653aceb22dba4b, NAME => 'TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:23:32,475 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 771007c821cc85e485653aceb22dba4b:A, priority=-2147483648, current under compaction store size is 1 2024-11-27T16:23:32,475 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T16:23:32,475 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 771007c821cc85e485653aceb22dba4b:B, priority=-2147483648, current under compaction store size is 2 2024-11-27T16:23:32,475 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-27T16:23:32,475 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 771007c821cc85e485653aceb22dba4b:C, priority=-2147483648, current under compaction store size is 3 2024-11-27T16:23:32,475 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-11-27T16:23:32,475 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 5 store files, 0 compacting, 5 eligible, 16 blocking 2024-11-27T16:23:32,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4114): Remote procedure failed, pid=162 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 771007c821cc85e485653aceb22dba4b, NAME => 'TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 771007c821cc85e485653aceb22dba4b, NAME => 'TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:23:32,477 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 5 files of size 154775 starting at candidate #0 after considering 6 permutations with 6 in ratio 2024-11-27T16:23:32,477 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HStore(1540): 771007c821cc85e485653aceb22dba4b/A is initiating minor compaction (all files) 2024-11-27T16:23:32,477 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 771007c821cc85e485653aceb22dba4b/A in TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b. 2024-11-27T16:23:32,477 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/A/c769303f10bd41c8adb8f7cb97f451c2, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/A/6a97e462c4ff4afd8c0fe7d1a7158a95, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/A/f70e6599e09c4af584ccd96b67e646ea, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/A/50ebfba186954f0fae62d8a5193b4572, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/A/68bc21f36f5848f98db23990c6183ff4] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/.tmp, totalSize=151.1 K 2024-11-27T16:23:32,477 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=11 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b. 2024-11-27T16:23:32,477 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b. files: [hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/A/c769303f10bd41c8adb8f7cb97f451c2, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/A/6a97e462c4ff4afd8c0fe7d1a7158a95, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/A/f70e6599e09c4af584ccd96b67e646ea, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/A/50ebfba186954f0fae62d8a5193b4572, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/A/68bc21f36f5848f98db23990c6183ff4] 2024-11-27T16:23:32,478 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 5 files of size 60005 starting at candidate #0 after considering 6 permutations with 6 in ratio 2024-11-27T16:23:32,478 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.Compactor(224): Compacting c769303f10bd41c8adb8f7cb97f451c2, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=15, earliestPutTs=1732724606242 2024-11-27T16:23:32,478 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HStore(1540): 771007c821cc85e485653aceb22dba4b/B is initiating minor compaction (all files) 2024-11-27T16:23:32,478 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 771007c821cc85e485653aceb22dba4b/B in TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b. 2024-11-27T16:23:32,478 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/B/68a11f410ea947e9b5ddbac368285266, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/B/08b218d73e7546bdb57e490353da8f0d, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/B/10740185d1fc4a74aaa4923736c6cbb3, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/B/db1b28dff7044aa4a7a18eac5233176c, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/B/bdcf10f210da4969baec322bf6766b23] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/.tmp, totalSize=58.6 K 2024-11-27T16:23:32,478 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6a97e462c4ff4afd8c0fe7d1a7158a95, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=42, earliestPutTs=1732724606303 2024-11-27T16:23:32,478 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting 68a11f410ea947e9b5ddbac368285266, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=15, earliestPutTs=1732724606242 2024-11-27T16:23:32,478 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.Compactor(224): Compacting f70e6599e09c4af584ccd96b67e646ea, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=52, earliestPutTs=1732724607451 2024-11-27T16:23:32,478 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting 08b218d73e7546bdb57e490353da8f0d, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=42, earliestPutTs=1732724606303 2024-11-27T16:23:32,479 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting 10740185d1fc4a74aaa4923736c6cbb3, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=52, earliestPutTs=1732724607451 2024-11-27T16:23:32,479 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 50ebfba186954f0fae62d8a5193b4572, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=78, earliestPutTs=1732724608468 2024-11-27T16:23:32,479 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting db1b28dff7044aa4a7a18eac5233176c, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=78, earliestPutTs=1732724608468 2024-11-27T16:23:32,479 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 68bc21f36f5848f98db23990c6183ff4, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=89, earliestPutTs=1732724610601 2024-11-27T16:23:32,479 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting bdcf10f210da4969baec322bf6766b23, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=89, earliestPutTs=1732724610601 2024-11-27T16:23:32,487 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=771007c821cc85e485653aceb22dba4b] 2024-11-27T16:23:32,489 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 771007c821cc85e485653aceb22dba4b#B#compaction#582 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-27T16:23:32,490 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/.tmp/B/bc48ebd121ab4a4a9a580ad5583c92a5 is 50, key is test_row_0/B:col10/1732724610601/Put/seqid=0 2024-11-27T16:23:32,491 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202411273b6106535f20485fb05e97ec7ffff8d6_771007c821cc85e485653aceb22dba4b store=[table=TestAcidGuarantees family=A region=771007c821cc85e485653aceb22dba4b] 2024-11-27T16:23:32,493 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202411273b6106535f20485fb05e97ec7ffff8d6_771007c821cc85e485653aceb22dba4b, store=[table=TestAcidGuarantees family=A region=771007c821cc85e485653aceb22dba4b] 2024-11-27T16:23:32,493 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411273b6106535f20485fb05e97ec7ffff8d6_771007c821cc85e485653aceb22dba4b because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=771007c821cc85e485653aceb22dba4b] 2024-11-27T16:23:32,497 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742499_1675 (size=12173) 2024-11-27T16:23:32,501 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742500_1676 (size=4469) 2024-11-27T16:23:32,502 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 771007c821cc85e485653aceb22dba4b#A#compaction#581 average throughput is 1.63 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-27T16:23:32,502 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/.tmp/A/98f17541f0de471fb05d373c3358e32e is 175, key is test_row_0/A:col10/1732724610601/Put/seqid=0 2024-11-27T16:23:32,505 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742501_1677 (size=31127) 2024-11-27T16:23:32,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=161 2024-11-27T16:23:32,627 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:23:32,627 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=162 2024-11-27T16:23:32,627 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b. 2024-11-27T16:23:32,627 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HRegion(2837): Flushing 771007c821cc85e485653aceb22dba4b 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-27T16:23:32,628 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 771007c821cc85e485653aceb22dba4b, store=A 2024-11-27T16:23:32,628 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:23:32,628 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 771007c821cc85e485653aceb22dba4b, store=B 2024-11-27T16:23:32,628 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:23:32,628 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 771007c821cc85e485653aceb22dba4b, store=C 2024-11-27T16:23:32,628 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:23:32,633 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241127594e8e1615354564af91972d5eb3cc31_771007c821cc85e485653aceb22dba4b is 50, key is test_row_0/A:col10/1732724611264/Put/seqid=0 2024-11-27T16:23:32,636 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742502_1678 (size=12154) 2024-11-27T16:23:32,901 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/.tmp/B/bc48ebd121ab4a4a9a580ad5583c92a5 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/B/bc48ebd121ab4a4a9a580ad5583c92a5 2024-11-27T16:23:32,905 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 5 (all) file(s) in 771007c821cc85e485653aceb22dba4b/B of 771007c821cc85e485653aceb22dba4b into bc48ebd121ab4a4a9a580ad5583c92a5(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T16:23:32,905 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 771007c821cc85e485653aceb22dba4b: 2024-11-27T16:23:32,905 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b., storeName=771007c821cc85e485653aceb22dba4b/B, priority=11, startTime=1732724612475; duration=0sec 2024-11-27T16:23:32,905 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-27T16:23:32,905 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 771007c821cc85e485653aceb22dba4b:B 2024-11-27T16:23:32,905 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 5 store files, 0 compacting, 5 eligible, 16 blocking 2024-11-27T16:23:32,907 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 5 files of size 60005 starting at candidate #0 after considering 6 permutations with 6 in ratio 2024-11-27T16:23:32,907 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HStore(1540): 771007c821cc85e485653aceb22dba4b/C is initiating minor compaction (all files) 2024-11-27T16:23:32,907 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 771007c821cc85e485653aceb22dba4b/C in TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b. 2024-11-27T16:23:32,907 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/C/6f7d6b56624f4e40899d7e345b3bb59f, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/C/76618a60baee4a28b4696aac925b23c8, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/C/fd1900434fdb47c59b945b8c2c3c3ce2, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/C/1cf56d1afb2a44568b1fabaedc38ac2c, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/C/a9ff9b6cfbdf4703a428cfd836eb52e0] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/.tmp, totalSize=58.6 K 2024-11-27T16:23:32,908 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting 6f7d6b56624f4e40899d7e345b3bb59f, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=15, earliestPutTs=1732724606242 2024-11-27T16:23:32,908 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting 76618a60baee4a28b4696aac925b23c8, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=42, earliestPutTs=1732724606303 2024-11-27T16:23:32,908 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting fd1900434fdb47c59b945b8c2c3c3ce2, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=52, earliestPutTs=1732724607451 2024-11-27T16:23:32,909 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting 1cf56d1afb2a44568b1fabaedc38ac2c, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=78, earliestPutTs=1732724608468 2024-11-27T16:23:32,910 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting a9ff9b6cfbdf4703a428cfd836eb52e0, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=89, earliestPutTs=1732724610601 2024-11-27T16:23:32,912 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/.tmp/A/98f17541f0de471fb05d373c3358e32e as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/A/98f17541f0de471fb05d373c3358e32e 2024-11-27T16:23:32,924 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 5 (all) file(s) in 771007c821cc85e485653aceb22dba4b/A of 771007c821cc85e485653aceb22dba4b into 98f17541f0de471fb05d373c3358e32e(size=30.4 K), total size for store is 30.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T16:23:32,924 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 771007c821cc85e485653aceb22dba4b: 2024-11-27T16:23:32,924 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b., storeName=771007c821cc85e485653aceb22dba4b/A, priority=11, startTime=1732724612475; duration=0sec 2024-11-27T16:23:32,924 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T16:23:32,924 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 771007c821cc85e485653aceb22dba4b:A 2024-11-27T16:23:32,927 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 771007c821cc85e485653aceb22dba4b#C#compaction#584 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-27T16:23:32,928 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/.tmp/C/b17a7914226140b8b0be8c050544126c is 50, key is test_row_0/C:col10/1732724610601/Put/seqid=0 2024-11-27T16:23:32,942 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742503_1679 (size=12173) 2024-11-27T16:23:33,040 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:33,045 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241127594e8e1615354564af91972d5eb3cc31_771007c821cc85e485653aceb22dba4b to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241127594e8e1615354564af91972d5eb3cc31_771007c821cc85e485653aceb22dba4b 2024-11-27T16:23:33,049 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/.tmp/A/ef4d3aaa2d444600ab42e28791e7e8f0, store: [table=TestAcidGuarantees family=A region=771007c821cc85e485653aceb22dba4b] 2024-11-27T16:23:33,050 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/.tmp/A/ef4d3aaa2d444600ab42e28791e7e8f0 is 175, key is test_row_0/A:col10/1732724611264/Put/seqid=0 2024-11-27T16:23:33,058 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742504_1680 (size=30955) 2024-11-27T16:23:33,059 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=114, memsize=49.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/.tmp/A/ef4d3aaa2d444600ab42e28791e7e8f0 2024-11-27T16:23:33,066 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/.tmp/B/dc10e3811ab6475488f57fcf8d362cc1 is 50, key is test_row_0/B:col10/1732724611264/Put/seqid=0 2024-11-27T16:23:33,089 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742505_1681 (size=12001) 2024-11-27T16:23:33,357 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/.tmp/C/b17a7914226140b8b0be8c050544126c as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/C/b17a7914226140b8b0be8c050544126c 2024-11-27T16:23:33,363 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 5 (all) file(s) in 771007c821cc85e485653aceb22dba4b/C of 771007c821cc85e485653aceb22dba4b into b17a7914226140b8b0be8c050544126c(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T16:23:33,363 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 771007c821cc85e485653aceb22dba4b: 2024-11-27T16:23:33,363 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b., storeName=771007c821cc85e485653aceb22dba4b/C, priority=11, startTime=1732724612475; duration=0sec 2024-11-27T16:23:33,364 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T16:23:33,364 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 771007c821cc85e485653aceb22dba4b:C 2024-11-27T16:23:33,400 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b. as already flushing 2024-11-27T16:23:33,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(8581): Flush requested on 771007c821cc85e485653aceb22dba4b 2024-11-27T16:23:33,415 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:33,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49368 deadline: 1732724673413, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:33,417 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:33,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49390 deadline: 1732724673415, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:33,487 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=114 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/.tmp/B/dc10e3811ab6475488f57fcf8d362cc1 2024-11-27T16:23:33,506 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/.tmp/C/f8c178e51e39487f8de9e67a25267aae is 50, key is test_row_0/C:col10/1732724611264/Put/seqid=0 2024-11-27T16:23:33,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=161 2024-11-27T16:23:33,524 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:33,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49368 deadline: 1732724673522, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:33,524 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:33,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49390 deadline: 1732724673523, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:33,549 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742506_1682 (size=12001) 2024-11-27T16:23:33,728 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:33,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49390 deadline: 1732724673725, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:33,728 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:33,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49368 deadline: 1732724673726, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:33,950 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=114 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/.tmp/C/f8c178e51e39487f8de9e67a25267aae 2024-11-27T16:23:33,958 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/.tmp/A/ef4d3aaa2d444600ab42e28791e7e8f0 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/A/ef4d3aaa2d444600ab42e28791e7e8f0 2024-11-27T16:23:33,962 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/A/ef4d3aaa2d444600ab42e28791e7e8f0, entries=150, sequenceid=114, filesize=30.2 K 2024-11-27T16:23:33,963 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/.tmp/B/dc10e3811ab6475488f57fcf8d362cc1 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/B/dc10e3811ab6475488f57fcf8d362cc1 2024-11-27T16:23:33,969 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/B/dc10e3811ab6475488f57fcf8d362cc1, entries=150, sequenceid=114, filesize=11.7 K 2024-11-27T16:23:33,970 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/.tmp/C/f8c178e51e39487f8de9e67a25267aae as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/C/f8c178e51e39487f8de9e67a25267aae 2024-11-27T16:23:33,974 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/C/f8c178e51e39487f8de9e67a25267aae, entries=150, sequenceid=114, filesize=11.7 K 2024-11-27T16:23:33,975 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for 771007c821cc85e485653aceb22dba4b in 1348ms, sequenceid=114, compaction requested=false 2024-11-27T16:23:33,975 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HRegion(2538): Flush status journal for 771007c821cc85e485653aceb22dba4b: 2024-11-27T16:23:33,975 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b. 2024-11-27T16:23:33,975 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=162 2024-11-27T16:23:33,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4106): Remote procedure done, pid=162 2024-11-27T16:23:33,978 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=162, resume processing ppid=161 2024-11-27T16:23:33,978 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=162, ppid=161, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.5700 sec 2024-11-27T16:23:33,980 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=161, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=161, table=TestAcidGuarantees in 2.5740 sec 2024-11-27T16:23:34,033 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 771007c821cc85e485653aceb22dba4b 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-11-27T16:23:34,033 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 771007c821cc85e485653aceb22dba4b, store=A 2024-11-27T16:23:34,033 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:23:34,033 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 771007c821cc85e485653aceb22dba4b, store=B 2024-11-27T16:23:34,033 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:23:34,033 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 771007c821cc85e485653aceb22dba4b, store=C 2024-11-27T16:23:34,033 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:23:34,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(8581): Flush requested on 771007c821cc85e485653aceb22dba4b 2024-11-27T16:23:34,060 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241127685040b6b0d74871a8a097cca7437911_771007c821cc85e485653aceb22dba4b is 50, key is test_row_0/A:col10/1732724613411/Put/seqid=0 2024-11-27T16:23:34,095 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:34,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49390 deadline: 1732724674091, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:34,097 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:34,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49368 deadline: 1732724674093, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:34,134 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742507_1683 (size=12204) 2024-11-27T16:23:34,135 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:34,138 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241127685040b6b0d74871a8a097cca7437911_771007c821cc85e485653aceb22dba4b to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241127685040b6b0d74871a8a097cca7437911_771007c821cc85e485653aceb22dba4b 2024-11-27T16:23:34,139 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/.tmp/A/df89bfcd088e4a5c9dea8da7df5be53a, store: [table=TestAcidGuarantees family=A region=771007c821cc85e485653aceb22dba4b] 2024-11-27T16:23:34,139 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/.tmp/A/df89bfcd088e4a5c9dea8da7df5be53a is 175, key is test_row_0/A:col10/1732724613411/Put/seqid=0 2024-11-27T16:23:34,173 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742508_1684 (size=31005) 2024-11-27T16:23:34,201 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:34,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49390 deadline: 1732724674198, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:34,202 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:34,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49368 deadline: 1732724674199, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:34,406 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:34,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49390 deadline: 1732724674403, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:34,421 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:34,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49368 deadline: 1732724674417, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:34,574 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=129, memsize=20.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/.tmp/A/df89bfcd088e4a5c9dea8da7df5be53a 2024-11-27T16:23:34,592 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/.tmp/B/7ade542436be4518bcbccaff01b2cb2c is 50, key is test_row_0/B:col10/1732724613411/Put/seqid=0 2024-11-27T16:23:34,644 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742509_1685 (size=12051) 2024-11-27T16:23:34,646 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=129 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/.tmp/B/7ade542436be4518bcbccaff01b2cb2c 2024-11-27T16:23:34,672 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/.tmp/C/c6a8bbe7286e4a2b8cd07051015dfacb is 50, key is test_row_0/C:col10/1732724613411/Put/seqid=0 2024-11-27T16:23:34,710 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:34,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49390 deadline: 1732724674709, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:34,719 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742510_1686 (size=12051) 2024-11-27T16:23:34,720 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=129 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/.tmp/C/c6a8bbe7286e4a2b8cd07051015dfacb 2024-11-27T16:23:34,724 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/.tmp/A/df89bfcd088e4a5c9dea8da7df5be53a as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/A/df89bfcd088e4a5c9dea8da7df5be53a 2024-11-27T16:23:34,727 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/A/df89bfcd088e4a5c9dea8da7df5be53a, entries=150, sequenceid=129, filesize=30.3 K 2024-11-27T16:23:34,728 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:34,728 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/.tmp/B/7ade542436be4518bcbccaff01b2cb2c as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/B/7ade542436be4518bcbccaff01b2cb2c 2024-11-27T16:23:34,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49368 deadline: 1732724674722, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:34,732 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/B/7ade542436be4518bcbccaff01b2cb2c, entries=150, sequenceid=129, filesize=11.8 K 2024-11-27T16:23:34,733 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/.tmp/C/c6a8bbe7286e4a2b8cd07051015dfacb as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/C/c6a8bbe7286e4a2b8cd07051015dfacb 2024-11-27T16:23:34,737 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/C/c6a8bbe7286e4a2b8cd07051015dfacb, entries=150, sequenceid=129, filesize=11.8 K 2024-11-27T16:23:34,738 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for 771007c821cc85e485653aceb22dba4b in 705ms, sequenceid=129, compaction requested=true 2024-11-27T16:23:34,738 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 771007c821cc85e485653aceb22dba4b: 2024-11-27T16:23:34,738 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 771007c821cc85e485653aceb22dba4b:A, priority=-2147483648, current under compaction store size is 1 2024-11-27T16:23:34,738 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T16:23:34,738 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 771007c821cc85e485653aceb22dba4b:B, priority=-2147483648, current under compaction store size is 2 2024-11-27T16:23:34,738 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-27T16:23:34,738 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-27T16:23:34,738 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 771007c821cc85e485653aceb22dba4b:C, priority=-2147483648, current under compaction store size is 3 2024-11-27T16:23:34,738 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-11-27T16:23:34,739 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-27T16:23:34,739 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 93087 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-27T16:23:34,739 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HStore(1540): 771007c821cc85e485653aceb22dba4b/A is initiating minor compaction (all files) 2024-11-27T16:23:34,739 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 771007c821cc85e485653aceb22dba4b/A in TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b. 2024-11-27T16:23:34,740 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/A/98f17541f0de471fb05d373c3358e32e, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/A/ef4d3aaa2d444600ab42e28791e7e8f0, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/A/df89bfcd088e4a5c9dea8da7df5be53a] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/.tmp, totalSize=90.9 K 2024-11-27T16:23:34,740 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b. 2024-11-27T16:23:34,740 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b. files: [hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/A/98f17541f0de471fb05d373c3358e32e, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/A/ef4d3aaa2d444600ab42e28791e7e8f0, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/A/df89bfcd088e4a5c9dea8da7df5be53a] 2024-11-27T16:23:34,740 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36225 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-27T16:23:34,740 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HStore(1540): 771007c821cc85e485653aceb22dba4b/B is initiating minor compaction (all files) 2024-11-27T16:23:34,740 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 771007c821cc85e485653aceb22dba4b/B in TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b. 2024-11-27T16:23:34,740 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/B/bc48ebd121ab4a4a9a580ad5583c92a5, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/B/dc10e3811ab6475488f57fcf8d362cc1, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/B/7ade542436be4518bcbccaff01b2cb2c] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/.tmp, totalSize=35.4 K 2024-11-27T16:23:34,740 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 98f17541f0de471fb05d373c3358e32e, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=89, earliestPutTs=1732724610601 2024-11-27T16:23:34,740 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting bc48ebd121ab4a4a9a580ad5583c92a5, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=89, earliestPutTs=1732724610601 2024-11-27T16:23:34,741 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.Compactor(224): Compacting ef4d3aaa2d444600ab42e28791e7e8f0, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=114, earliestPutTs=1732724611248 2024-11-27T16:23:34,741 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting dc10e3811ab6475488f57fcf8d362cc1, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=114, earliestPutTs=1732724611248 2024-11-27T16:23:34,741 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.Compactor(224): Compacting df89bfcd088e4a5c9dea8da7df5be53a, keycount=150, bloomtype=ROW, size=30.3 K, encoding=NONE, compression=NONE, seqNum=129, earliestPutTs=1732724613409 2024-11-27T16:23:34,741 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting 7ade542436be4518bcbccaff01b2cb2c, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=129, earliestPutTs=1732724613409 2024-11-27T16:23:34,754 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=771007c821cc85e485653aceb22dba4b] 2024-11-27T16:23:34,760 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 771007c821cc85e485653aceb22dba4b#B#compaction#591 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-27T16:23:34,760 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/.tmp/B/5985980d92c64f4fa8e06af189374428 is 50, key is test_row_0/B:col10/1732724613411/Put/seqid=0 2024-11-27T16:23:34,772 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241127e06addc0fc6c4d6db891d4355aeeb5bc_771007c821cc85e485653aceb22dba4b store=[table=TestAcidGuarantees family=A region=771007c821cc85e485653aceb22dba4b] 2024-11-27T16:23:34,774 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241127e06addc0fc6c4d6db891d4355aeeb5bc_771007c821cc85e485653aceb22dba4b, store=[table=TestAcidGuarantees family=A region=771007c821cc85e485653aceb22dba4b] 2024-11-27T16:23:34,774 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241127e06addc0fc6c4d6db891d4355aeeb5bc_771007c821cc85e485653aceb22dba4b because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=771007c821cc85e485653aceb22dba4b] 2024-11-27T16:23:34,804 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742512_1688 (size=4469) 2024-11-27T16:23:34,804 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742511_1687 (size=12325) 2024-11-27T16:23:34,807 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 771007c821cc85e485653aceb22dba4b#A#compaction#590 average throughput is 0.46 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-27T16:23:34,808 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/.tmp/A/5223feaf019f4799897f1ee64bd71cd6 is 175, key is test_row_0/A:col10/1732724613411/Put/seqid=0 2024-11-27T16:23:34,813 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/.tmp/B/5985980d92c64f4fa8e06af189374428 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/B/5985980d92c64f4fa8e06af189374428 2024-11-27T16:23:34,814 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742513_1689 (size=31279) 2024-11-27T16:23:34,827 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 771007c821cc85e485653aceb22dba4b/B of 771007c821cc85e485653aceb22dba4b into 5985980d92c64f4fa8e06af189374428(size=12.0 K), total size for store is 12.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T16:23:34,827 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 771007c821cc85e485653aceb22dba4b: 2024-11-27T16:23:34,827 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b., storeName=771007c821cc85e485653aceb22dba4b/B, priority=13, startTime=1732724614738; duration=0sec 2024-11-27T16:23:34,827 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-27T16:23:34,827 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 771007c821cc85e485653aceb22dba4b:B 2024-11-27T16:23:34,828 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-27T16:23:34,828 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36225 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-27T16:23:34,828 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HStore(1540): 771007c821cc85e485653aceb22dba4b/C is initiating minor compaction (all files) 2024-11-27T16:23:34,828 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 771007c821cc85e485653aceb22dba4b/C in TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b. 2024-11-27T16:23:34,829 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/C/b17a7914226140b8b0be8c050544126c, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/C/f8c178e51e39487f8de9e67a25267aae, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/C/c6a8bbe7286e4a2b8cd07051015dfacb] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/.tmp, totalSize=35.4 K 2024-11-27T16:23:34,829 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting b17a7914226140b8b0be8c050544126c, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=89, earliestPutTs=1732724610601 2024-11-27T16:23:34,830 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting f8c178e51e39487f8de9e67a25267aae, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=114, earliestPutTs=1732724611248 2024-11-27T16:23:34,830 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting c6a8bbe7286e4a2b8cd07051015dfacb, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=129, earliestPutTs=1732724613409 2024-11-27T16:23:34,849 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 771007c821cc85e485653aceb22dba4b#C#compaction#592 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-27T16:23:34,849 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/.tmp/C/3930a0ab195c4af0ba9758e11db068a8 is 50, key is test_row_0/C:col10/1732724613411/Put/seqid=0 2024-11-27T16:23:34,902 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742514_1690 (size=12325) 2024-11-27T16:23:35,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(8581): Flush requested on 771007c821cc85e485653aceb22dba4b 2024-11-27T16:23:35,218 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 771007c821cc85e485653aceb22dba4b 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-27T16:23:35,218 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 771007c821cc85e485653aceb22dba4b, store=A 2024-11-27T16:23:35,218 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:23:35,218 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 771007c821cc85e485653aceb22dba4b, store=B 2024-11-27T16:23:35,218 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:23:35,218 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 771007c821cc85e485653aceb22dba4b, store=C 2024-11-27T16:23:35,218 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:23:35,231 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/.tmp/A/5223feaf019f4799897f1ee64bd71cd6 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/A/5223feaf019f4799897f1ee64bd71cd6 2024-11-27T16:23:35,234 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241127d4f545e329b947c9b50244ac2169408f_771007c821cc85e485653aceb22dba4b is 50, key is test_row_0/A:col10/1732724614084/Put/seqid=0 2024-11-27T16:23:35,236 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 771007c821cc85e485653aceb22dba4b/A of 771007c821cc85e485653aceb22dba4b into 5223feaf019f4799897f1ee64bd71cd6(size=30.5 K), total size for store is 30.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T16:23:35,237 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 771007c821cc85e485653aceb22dba4b: 2024-11-27T16:23:35,237 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b., storeName=771007c821cc85e485653aceb22dba4b/A, priority=13, startTime=1732724614738; duration=0sec 2024-11-27T16:23:35,237 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T16:23:35,237 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 771007c821cc85e485653aceb22dba4b:A 2024-11-27T16:23:35,244 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742515_1691 (size=14794) 2024-11-27T16:23:35,244 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:35,248 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:35,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49390 deadline: 1732724675244, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:35,252 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:35,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49368 deadline: 1732724675249, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:35,253 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241127d4f545e329b947c9b50244ac2169408f_771007c821cc85e485653aceb22dba4b to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241127d4f545e329b947c9b50244ac2169408f_771007c821cc85e485653aceb22dba4b 2024-11-27T16:23:35,255 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/.tmp/A/e363b486582e489f8a5d709690f703a8, store: [table=TestAcidGuarantees family=A region=771007c821cc85e485653aceb22dba4b] 2024-11-27T16:23:35,255 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/.tmp/A/e363b486582e489f8a5d709690f703a8 is 175, key is test_row_0/A:col10/1732724614084/Put/seqid=0 2024-11-27T16:23:35,289 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742516_1692 (size=39749) 2024-11-27T16:23:35,310 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/.tmp/C/3930a0ab195c4af0ba9758e11db068a8 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/C/3930a0ab195c4af0ba9758e11db068a8 2024-11-27T16:23:35,325 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 771007c821cc85e485653aceb22dba4b/C of 771007c821cc85e485653aceb22dba4b into 3930a0ab195c4af0ba9758e11db068a8(size=12.0 K), total size for store is 12.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T16:23:35,325 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 771007c821cc85e485653aceb22dba4b: 2024-11-27T16:23:35,325 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b., storeName=771007c821cc85e485653aceb22dba4b/C, priority=13, startTime=1732724614738; duration=0sec 2024-11-27T16:23:35,325 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T16:23:35,325 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 771007c821cc85e485653aceb22dba4b:C 2024-11-27T16:23:35,351 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:35,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49390 deadline: 1732724675350, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:35,355 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:35,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49368 deadline: 1732724675353, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:35,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=161 2024-11-27T16:23:35,512 INFO [Thread-2863 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 161 completed 2024-11-27T16:23:35,513 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-27T16:23:35,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] procedure2.ProcedureExecutor(1098): Stored pid=163, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=163, table=TestAcidGuarantees 2024-11-27T16:23:35,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=163 2024-11-27T16:23:35,516 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=163, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=163, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-27T16:23:35,517 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=163, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=163, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-27T16:23:35,517 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=164, ppid=163, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-27T16:23:35,556 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:35,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49390 deadline: 1732724675552, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:35,560 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:35,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49368 deadline: 1732724675557, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:35,598 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:35,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49348 deadline: 1732724675596, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:35,598 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:35,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49412 deadline: 1732724675596, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:35,599 DEBUG [Thread-2857 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8143 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b., hostname=7b191dec6496,44169,1732724452967, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-27T16:23:35,599 DEBUG [Thread-2853 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8145 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b., hostname=7b191dec6496,44169,1732724452967, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-27T16:23:35,604 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:35,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49380 deadline: 1732724675600, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:35,608 DEBUG [Thread-2861 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8151 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b., hostname=7b191dec6496,44169,1732724452967, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-27T16:23:35,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=163 2024-11-27T16:23:35,669 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:23:35,669 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=164 2024-11-27T16:23:35,670 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b. 2024-11-27T16:23:35,670 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b. as already flushing 2024-11-27T16:23:35,670 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b. 2024-11-27T16:23:35,670 ERROR [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] handler.RSProcedureHandler(58): pid=164 java.io.IOException: Unable to complete flush {ENCODED => 771007c821cc85e485653aceb22dba4b, NAME => 'TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:23:35,670 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=164 java.io.IOException: Unable to complete flush {ENCODED => 771007c821cc85e485653aceb22dba4b, NAME => 'TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:23:35,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4114): Remote procedure failed, pid=164 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 771007c821cc85e485653aceb22dba4b, NAME => 'TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 771007c821cc85e485653aceb22dba4b, NAME => 'TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:23:35,692 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=155, memsize=49.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/.tmp/A/e363b486582e489f8a5d709690f703a8 2024-11-27T16:23:35,710 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/.tmp/B/9f6f084fdaa34cedbfc3c431f490294a is 50, key is test_row_0/B:col10/1732724614084/Put/seqid=0 2024-11-27T16:23:35,745 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742517_1693 (size=12151) 2024-11-27T16:23:35,746 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=155 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/.tmp/B/9f6f084fdaa34cedbfc3c431f490294a 2024-11-27T16:23:35,773 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/.tmp/C/871f103f757a40458fcb6ee1c6798295 is 50, key is test_row_0/C:col10/1732724614084/Put/seqid=0 2024-11-27T16:23:35,805 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742518_1694 (size=12151) 2024-11-27T16:23:35,807 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=155 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/.tmp/C/871f103f757a40458fcb6ee1c6798295 2024-11-27T16:23:35,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=163 2024-11-27T16:23:35,819 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/.tmp/A/e363b486582e489f8a5d709690f703a8 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/A/e363b486582e489f8a5d709690f703a8 2024-11-27T16:23:35,822 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:23:35,823 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=164 2024-11-27T16:23:35,823 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b. 2024-11-27T16:23:35,823 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b. as already flushing 2024-11-27T16:23:35,823 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b. 2024-11-27T16:23:35,823 ERROR [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] handler.RSProcedureHandler(58): pid=164 java.io.IOException: Unable to complete flush {ENCODED => 771007c821cc85e485653aceb22dba4b, NAME => 'TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:23:35,823 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=164 java.io.IOException: Unable to complete flush {ENCODED => 771007c821cc85e485653aceb22dba4b, NAME => 'TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:23:35,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4114): Remote procedure failed, pid=164 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 771007c821cc85e485653aceb22dba4b, NAME => 'TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 771007c821cc85e485653aceb22dba4b, NAME => 'TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:23:35,827 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/A/e363b486582e489f8a5d709690f703a8, entries=200, sequenceid=155, filesize=38.8 K 2024-11-27T16:23:35,828 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/.tmp/B/9f6f084fdaa34cedbfc3c431f490294a as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/B/9f6f084fdaa34cedbfc3c431f490294a 2024-11-27T16:23:35,833 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/B/9f6f084fdaa34cedbfc3c431f490294a, entries=150, sequenceid=155, filesize=11.9 K 2024-11-27T16:23:35,834 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/.tmp/C/871f103f757a40458fcb6ee1c6798295 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/C/871f103f757a40458fcb6ee1c6798295 2024-11-27T16:23:35,837 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/C/871f103f757a40458fcb6ee1c6798295, entries=150, sequenceid=155, filesize=11.9 K 2024-11-27T16:23:35,839 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for 771007c821cc85e485653aceb22dba4b in 621ms, sequenceid=155, compaction requested=false 2024-11-27T16:23:35,839 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 771007c821cc85e485653aceb22dba4b: 2024-11-27T16:23:35,862 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 771007c821cc85e485653aceb22dba4b 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-11-27T16:23:35,862 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 771007c821cc85e485653aceb22dba4b, store=A 2024-11-27T16:23:35,863 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:23:35,863 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 771007c821cc85e485653aceb22dba4b, store=B 2024-11-27T16:23:35,863 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:23:35,863 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 771007c821cc85e485653aceb22dba4b, store=C 2024-11-27T16:23:35,863 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:23:35,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(8581): Flush requested on 771007c821cc85e485653aceb22dba4b 2024-11-27T16:23:35,870 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241127c4cbe4e818f94da09881928988aef8ef_771007c821cc85e485653aceb22dba4b is 50, key is test_row_0/A:col10/1732724615226/Put/seqid=0 2024-11-27T16:23:35,897 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742519_1695 (size=14794) 2024-11-27T16:23:35,899 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:35,903 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241127c4cbe4e818f94da09881928988aef8ef_771007c821cc85e485653aceb22dba4b to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241127c4cbe4e818f94da09881928988aef8ef_771007c821cc85e485653aceb22dba4b 2024-11-27T16:23:35,905 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/.tmp/A/96ecebd0336f4b52b413b679253e7892, store: [table=TestAcidGuarantees family=A region=771007c821cc85e485653aceb22dba4b] 2024-11-27T16:23:35,906 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/.tmp/A/96ecebd0336f4b52b413b679253e7892 is 175, key is test_row_0/A:col10/1732724615226/Put/seqid=0 2024-11-27T16:23:35,921 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742520_1696 (size=39749) 2024-11-27T16:23:35,926 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=169, memsize=20.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/.tmp/A/96ecebd0336f4b52b413b679253e7892 2024-11-27T16:23:35,935 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/.tmp/B/a578bffa410647e4bc7fbe4190a56f41 is 50, key is test_row_0/B:col10/1732724615226/Put/seqid=0 2024-11-27T16:23:35,954 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:35,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49390 deadline: 1732724675951, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:35,957 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:35,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49368 deadline: 1732724675954, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:35,958 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742521_1697 (size=12151) 2024-11-27T16:23:35,959 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=169 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/.tmp/B/a578bffa410647e4bc7fbe4190a56f41 2024-11-27T16:23:35,975 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:23:35,976 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=164 2024-11-27T16:23:35,976 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b. 2024-11-27T16:23:35,976 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b. as already flushing 2024-11-27T16:23:35,976 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b. 2024-11-27T16:23:35,976 ERROR [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] handler.RSProcedureHandler(58): pid=164 java.io.IOException: Unable to complete flush {ENCODED => 771007c821cc85e485653aceb22dba4b, NAME => 'TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:23:35,976 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=164 java.io.IOException: Unable to complete flush {ENCODED => 771007c821cc85e485653aceb22dba4b, NAME => 'TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:23:35,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4114): Remote procedure failed, pid=164 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 771007c821cc85e485653aceb22dba4b, NAME => 'TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 771007c821cc85e485653aceb22dba4b, NAME => 'TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:23:35,981 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/.tmp/C/724cef2dfb224cea98ff09ca46a26e49 is 50, key is test_row_0/C:col10/1732724615226/Put/seqid=0 2024-11-27T16:23:36,015 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742522_1698 (size=12151) 2024-11-27T16:23:36,016 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=169 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/.tmp/C/724cef2dfb224cea98ff09ca46a26e49 2024-11-27T16:23:36,023 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/.tmp/A/96ecebd0336f4b52b413b679253e7892 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/A/96ecebd0336f4b52b413b679253e7892 2024-11-27T16:23:36,029 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/A/96ecebd0336f4b52b413b679253e7892, entries=200, sequenceid=169, filesize=38.8 K 2024-11-27T16:23:36,030 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/.tmp/B/a578bffa410647e4bc7fbe4190a56f41 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/B/a578bffa410647e4bc7fbe4190a56f41 2024-11-27T16:23:36,035 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/B/a578bffa410647e4bc7fbe4190a56f41, entries=150, sequenceid=169, filesize=11.9 K 2024-11-27T16:23:36,036 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/.tmp/C/724cef2dfb224cea98ff09ca46a26e49 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/C/724cef2dfb224cea98ff09ca46a26e49 2024-11-27T16:23:36,045 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/C/724cef2dfb224cea98ff09ca46a26e49, entries=150, sequenceid=169, filesize=11.9 K 2024-11-27T16:23:36,046 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for 771007c821cc85e485653aceb22dba4b in 184ms, sequenceid=169, compaction requested=true 2024-11-27T16:23:36,046 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 771007c821cc85e485653aceb22dba4b: 2024-11-27T16:23:36,046 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-27T16:23:36,046 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 771007c821cc85e485653aceb22dba4b:A, priority=-2147483648, current under compaction store size is 1 2024-11-27T16:23:36,046 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T16:23:36,047 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-27T16:23:36,047 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 771007c821cc85e485653aceb22dba4b:B, priority=-2147483648, current under compaction store size is 2 2024-11-27T16:23:36,047 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T16:23:36,047 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 771007c821cc85e485653aceb22dba4b:C, priority=-2147483648, current under compaction store size is 3 2024-11-27T16:23:36,047 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-27T16:23:36,048 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36627 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-27T16:23:36,048 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HStore(1540): 771007c821cc85e485653aceb22dba4b/B is initiating minor compaction (all files) 2024-11-27T16:23:36,048 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 771007c821cc85e485653aceb22dba4b/B in TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b. 2024-11-27T16:23:36,048 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/B/5985980d92c64f4fa8e06af189374428, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/B/9f6f084fdaa34cedbfc3c431f490294a, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/B/a578bffa410647e4bc7fbe4190a56f41] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/.tmp, totalSize=35.8 K 2024-11-27T16:23:36,048 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 110777 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-27T16:23:36,048 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HStore(1540): 771007c821cc85e485653aceb22dba4b/A is initiating minor compaction (all files) 2024-11-27T16:23:36,048 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 771007c821cc85e485653aceb22dba4b/A in TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b. 2024-11-27T16:23:36,048 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/A/5223feaf019f4799897f1ee64bd71cd6, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/A/e363b486582e489f8a5d709690f703a8, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/A/96ecebd0336f4b52b413b679253e7892] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/.tmp, totalSize=108.2 K 2024-11-27T16:23:36,048 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b. 2024-11-27T16:23:36,048 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b. files: [hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/A/5223feaf019f4799897f1ee64bd71cd6, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/A/e363b486582e489f8a5d709690f703a8, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/A/96ecebd0336f4b52b413b679253e7892] 2024-11-27T16:23:36,049 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting 5985980d92c64f4fa8e06af189374428, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=129, earliestPutTs=1732724613409 2024-11-27T16:23:36,049 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5223feaf019f4799897f1ee64bd71cd6, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=129, earliestPutTs=1732724613409 2024-11-27T16:23:36,050 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting 9f6f084fdaa34cedbfc3c431f490294a, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=155, earliestPutTs=1732724614084 2024-11-27T16:23:36,050 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.Compactor(224): Compacting e363b486582e489f8a5d709690f703a8, keycount=200, bloomtype=ROW, size=38.8 K, encoding=NONE, compression=NONE, seqNum=155, earliestPutTs=1732724614084 2024-11-27T16:23:36,050 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 96ecebd0336f4b52b413b679253e7892, keycount=200, bloomtype=ROW, size=38.8 K, encoding=NONE, compression=NONE, seqNum=169, earliestPutTs=1732724615226 2024-11-27T16:23:36,050 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting a578bffa410647e4bc7fbe4190a56f41, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=169, earliestPutTs=1732724615226 2024-11-27T16:23:36,058 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 771007c821cc85e485653aceb22dba4b 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-27T16:23:36,058 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 771007c821cc85e485653aceb22dba4b, store=A 2024-11-27T16:23:36,059 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:23:36,059 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 771007c821cc85e485653aceb22dba4b, store=B 2024-11-27T16:23:36,059 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:23:36,059 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 771007c821cc85e485653aceb22dba4b, store=C 2024-11-27T16:23:36,059 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:23:36,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(8581): Flush requested on 771007c821cc85e485653aceb22dba4b 2024-11-27T16:23:36,069 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=771007c821cc85e485653aceb22dba4b] 2024-11-27T16:23:36,085 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:36,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49368 deadline: 1732724676082, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:36,088 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:36,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49390 deadline: 1732724676084, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:36,094 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 771007c821cc85e485653aceb22dba4b#B#compaction#600 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-27T16:23:36,094 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/.tmp/B/26c678bfa0ca4ba28da987d1fdefdc3e is 50, key is test_row_0/B:col10/1732724615226/Put/seqid=0 2024-11-27T16:23:36,104 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241127114009b77aa3483cb9d7fc8e305536bc_771007c821cc85e485653aceb22dba4b store=[table=TestAcidGuarantees family=A region=771007c821cc85e485653aceb22dba4b] 2024-11-27T16:23:36,106 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241127114009b77aa3483cb9d7fc8e305536bc_771007c821cc85e485653aceb22dba4b, store=[table=TestAcidGuarantees family=A region=771007c821cc85e485653aceb22dba4b] 2024-11-27T16:23:36,106 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241127114009b77aa3483cb9d7fc8e305536bc_771007c821cc85e485653aceb22dba4b because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=771007c821cc85e485653aceb22dba4b] 2024-11-27T16:23:36,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=163 2024-11-27T16:23:36,122 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112716bad35b199540f8b04fc3a86fa4a418_771007c821cc85e485653aceb22dba4b is 50, key is test_row_0/A:col10/1732724615951/Put/seqid=0 2024-11-27T16:23:36,130 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:23:36,130 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=164 2024-11-27T16:23:36,130 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b. 2024-11-27T16:23:36,130 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b. as already flushing 2024-11-27T16:23:36,130 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b. 2024-11-27T16:23:36,130 ERROR [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] handler.RSProcedureHandler(58): pid=164 java.io.IOException: Unable to complete flush {ENCODED => 771007c821cc85e485653aceb22dba4b, NAME => 'TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:23:36,130 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=164 java.io.IOException: Unable to complete flush {ENCODED => 771007c821cc85e485653aceb22dba4b, NAME => 'TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:23:36,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4114): Remote procedure failed, pid=164 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 771007c821cc85e485653aceb22dba4b, NAME => 'TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 771007c821cc85e485653aceb22dba4b, NAME => 'TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:23:36,167 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742523_1699 (size=12527) 2024-11-27T16:23:36,172 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742524_1700 (size=4469) 2024-11-27T16:23:36,178 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 771007c821cc85e485653aceb22dba4b#A#compaction#599 average throughput is 0.22 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-27T16:23:36,178 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/.tmp/A/ec1429db38d946f68b9480a5c60509c8 is 175, key is test_row_0/A:col10/1732724615226/Put/seqid=0 2024-11-27T16:23:36,196 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:36,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49368 deadline: 1732724676192, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:36,196 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:36,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49390 deadline: 1732724676189, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:36,196 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742525_1701 (size=14794) 2024-11-27T16:23:36,202 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:36,205 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112716bad35b199540f8b04fc3a86fa4a418_771007c821cc85e485653aceb22dba4b to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112716bad35b199540f8b04fc3a86fa4a418_771007c821cc85e485653aceb22dba4b 2024-11-27T16:23:36,206 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/.tmp/A/93c25eecab824b60ac3606eb0fa49336, store: [table=TestAcidGuarantees family=A region=771007c821cc85e485653aceb22dba4b] 2024-11-27T16:23:36,207 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/.tmp/A/93c25eecab824b60ac3606eb0fa49336 is 175, key is test_row_0/A:col10/1732724615951/Put/seqid=0 2024-11-27T16:23:36,233 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742526_1702 (size=31481) 2024-11-27T16:23:36,253 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742527_1703 (size=39749) 2024-11-27T16:23:36,254 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=194, memsize=49.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/.tmp/A/93c25eecab824b60ac3606eb0fa49336 2024-11-27T16:23:36,266 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/.tmp/B/a17ad94addac4d4192eb6c5373eedf05 is 50, key is test_row_0/B:col10/1732724615951/Put/seqid=0 2024-11-27T16:23:36,282 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:23:36,283 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=164 2024-11-27T16:23:36,283 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b. 2024-11-27T16:23:36,283 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b. as already flushing 2024-11-27T16:23:36,283 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b. 2024-11-27T16:23:36,283 ERROR [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] handler.RSProcedureHandler(58): pid=164 java.io.IOException: Unable to complete flush {ENCODED => 771007c821cc85e485653aceb22dba4b, NAME => 'TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:23:36,283 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=164 java.io.IOException: Unable to complete flush {ENCODED => 771007c821cc85e485653aceb22dba4b, NAME => 'TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:23:36,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4114): Remote procedure failed, pid=164 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 771007c821cc85e485653aceb22dba4b, NAME => 'TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 771007c821cc85e485653aceb22dba4b, NAME => 'TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:23:36,297 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742528_1704 (size=12151) 2024-11-27T16:23:36,298 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=194 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/.tmp/B/a17ad94addac4d4192eb6c5373eedf05 2024-11-27T16:23:36,319 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/.tmp/C/b1009ad8097649b88ce9ee2dbd37a1fa is 50, key is test_row_0/C:col10/1732724615951/Put/seqid=0 2024-11-27T16:23:36,355 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742529_1705 (size=12151) 2024-11-27T16:23:36,356 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=194 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/.tmp/C/b1009ad8097649b88ce9ee2dbd37a1fa 2024-11-27T16:23:36,363 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/.tmp/A/93c25eecab824b60ac3606eb0fa49336 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/A/93c25eecab824b60ac3606eb0fa49336 2024-11-27T16:23:36,376 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/A/93c25eecab824b60ac3606eb0fa49336, entries=200, sequenceid=194, filesize=38.8 K 2024-11-27T16:23:36,377 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/.tmp/B/a17ad94addac4d4192eb6c5373eedf05 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/B/a17ad94addac4d4192eb6c5373eedf05 2024-11-27T16:23:36,382 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/B/a17ad94addac4d4192eb6c5373eedf05, entries=150, sequenceid=194, filesize=11.9 K 2024-11-27T16:23:36,384 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/.tmp/C/b1009ad8097649b88ce9ee2dbd37a1fa as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/C/b1009ad8097649b88ce9ee2dbd37a1fa 2024-11-27T16:23:36,390 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/C/b1009ad8097649b88ce9ee2dbd37a1fa, entries=150, sequenceid=194, filesize=11.9 K 2024-11-27T16:23:36,392 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for 771007c821cc85e485653aceb22dba4b in 334ms, sequenceid=194, compaction requested=true 2024-11-27T16:23:36,392 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 771007c821cc85e485653aceb22dba4b: 2024-11-27T16:23:36,392 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 771007c821cc85e485653aceb22dba4b:A, priority=-2147483648, current under compaction store size is 3 2024-11-27T16:23:36,392 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-11-27T16:23:36,392 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 771007c821cc85e485653aceb22dba4b:B, priority=-2147483648, current under compaction store size is 3 2024-11-27T16:23:36,392 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=3), splitQueue=0 2024-11-27T16:23:36,392 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 771007c821cc85e485653aceb22dba4b:C, priority=-2147483648, current under compaction store size is 3 2024-11-27T16:23:36,392 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=4), splitQueue=0 2024-11-27T16:23:36,399 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 771007c821cc85e485653aceb22dba4b 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-11-27T16:23:36,399 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 771007c821cc85e485653aceb22dba4b, store=A 2024-11-27T16:23:36,399 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:23:36,399 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 771007c821cc85e485653aceb22dba4b, store=B 2024-11-27T16:23:36,399 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:23:36,399 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 771007c821cc85e485653aceb22dba4b, store=C 2024-11-27T16:23:36,399 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:23:36,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(8581): Flush requested on 771007c821cc85e485653aceb22dba4b 2024-11-27T16:23:36,410 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241127b221a84fa35241e4a8509812133c396e_771007c821cc85e485653aceb22dba4b is 50, key is test_row_0/A:col10/1732724616398/Put/seqid=0 2024-11-27T16:23:36,431 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742530_1706 (size=14794) 2024-11-27T16:23:36,432 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:36,435 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241127b221a84fa35241e4a8509812133c396e_771007c821cc85e485653aceb22dba4b to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241127b221a84fa35241e4a8509812133c396e_771007c821cc85e485653aceb22dba4b 2024-11-27T16:23:36,437 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:23:36,437 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=164 2024-11-27T16:23:36,437 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b. 2024-11-27T16:23:36,437 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b. as already flushing 2024-11-27T16:23:36,437 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b. 2024-11-27T16:23:36,437 ERROR [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] handler.RSProcedureHandler(58): pid=164 java.io.IOException: Unable to complete flush {ENCODED => 771007c821cc85e485653aceb22dba4b, NAME => 'TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:23:36,438 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=164 java.io.IOException: Unable to complete flush {ENCODED => 771007c821cc85e485653aceb22dba4b, NAME => 'TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:23:36,446 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/.tmp/A/8dc9dbb471914717a1ebfdd9f5cd5e28, store: [table=TestAcidGuarantees family=A region=771007c821cc85e485653aceb22dba4b] 2024-11-27T16:23:36,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4114): Remote procedure failed, pid=164 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 771007c821cc85e485653aceb22dba4b, NAME => 'TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 771007c821cc85e485653aceb22dba4b, NAME => 'TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:23:36,447 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/.tmp/A/8dc9dbb471914717a1ebfdd9f5cd5e28 is 175, key is test_row_0/A:col10/1732724616398/Put/seqid=0 2024-11-27T16:23:36,455 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742531_1707 (size=39749) 2024-11-27T16:23:36,497 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:36,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49368 deadline: 1732724676493, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:36,500 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:36,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49390 deadline: 1732724676497, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:36,575 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/.tmp/B/26c678bfa0ca4ba28da987d1fdefdc3e as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/B/26c678bfa0ca4ba28da987d1fdefdc3e 2024-11-27T16:23:36,580 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 771007c821cc85e485653aceb22dba4b/B of 771007c821cc85e485653aceb22dba4b into 26c678bfa0ca4ba28da987d1fdefdc3e(size=12.2 K), total size for store is 24.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T16:23:36,580 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 771007c821cc85e485653aceb22dba4b: 2024-11-27T16:23:36,580 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b., storeName=771007c821cc85e485653aceb22dba4b/B, priority=13, startTime=1732724616047; duration=0sec 2024-11-27T16:23:36,581 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=4), splitQueue=0 2024-11-27T16:23:36,581 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 771007c821cc85e485653aceb22dba4b:B 2024-11-27T16:23:36,581 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 3 compacting, 1 eligible, 16 blocking 2024-11-27T16:23:36,581 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 0 files of size 0 starting at candidate #-1 after considering 0 permutations with 0 in ratio 2024-11-27T16:23:36,581 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.SortedCompactionPolicy(232): Not compacting files because we only have 0 files ready for compaction. Need 3 to initiate. 2024-11-27T16:23:36,581 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.CompactSplit(450): Not compacting TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b. because compaction request was cancelled 2024-11-27T16:23:36,581 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 771007c821cc85e485653aceb22dba4b:A 2024-11-27T16:23:36,581 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-27T16:23:36,584 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48778 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-27T16:23:36,584 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HStore(1540): 771007c821cc85e485653aceb22dba4b/C is initiating minor compaction (all files) 2024-11-27T16:23:36,584 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 771007c821cc85e485653aceb22dba4b/C in TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b. 2024-11-27T16:23:36,585 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/C/3930a0ab195c4af0ba9758e11db068a8, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/C/871f103f757a40458fcb6ee1c6798295, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/C/724cef2dfb224cea98ff09ca46a26e49, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/C/b1009ad8097649b88ce9ee2dbd37a1fa] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/.tmp, totalSize=47.6 K 2024-11-27T16:23:36,585 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting 3930a0ab195c4af0ba9758e11db068a8, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=129, earliestPutTs=1732724613409 2024-11-27T16:23:36,586 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting 871f103f757a40458fcb6ee1c6798295, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=155, earliestPutTs=1732724614084 2024-11-27T16:23:36,586 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting 724cef2dfb224cea98ff09ca46a26e49, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=169, earliestPutTs=1732724615226 2024-11-27T16:23:36,586 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting b1009ad8097649b88ce9ee2dbd37a1fa, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=194, earliestPutTs=1732724615949 2024-11-27T16:23:36,597 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 771007c821cc85e485653aceb22dba4b#C#compaction#605 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-27T16:23:36,598 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/.tmp/C/742d8ff7a1ca4a70bbd8f65f3a3d90ad is 50, key is test_row_0/C:col10/1732724615951/Put/seqid=0 2024-11-27T16:23:36,598 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:23:36,599 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=164 2024-11-27T16:23:36,599 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b. 2024-11-27T16:23:36,599 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b. as already flushing 2024-11-27T16:23:36,600 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b. 2024-11-27T16:23:36,600 ERROR [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] handler.RSProcedureHandler(58): pid=164 java.io.IOException: Unable to complete flush {ENCODED => 771007c821cc85e485653aceb22dba4b, NAME => 'TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:23:36,600 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=164 java.io.IOException: Unable to complete flush {ENCODED => 771007c821cc85e485653aceb22dba4b, NAME => 'TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:23:36,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4114): Remote procedure failed, pid=164 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 771007c821cc85e485653aceb22dba4b, NAME => 'TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 771007c821cc85e485653aceb22dba4b, NAME => 'TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:23:36,604 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:36,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 144 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49368 deadline: 1732724676600, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:36,609 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:36,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49390 deadline: 1732724676602, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:36,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=163 2024-11-27T16:23:36,638 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/.tmp/A/ec1429db38d946f68b9480a5c60509c8 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/A/ec1429db38d946f68b9480a5c60509c8 2024-11-27T16:23:36,638 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742532_1708 (size=12561) 2024-11-27T16:23:36,643 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 771007c821cc85e485653aceb22dba4b/A of 771007c821cc85e485653aceb22dba4b into ec1429db38d946f68b9480a5c60509c8(size=30.7 K), total size for store is 69.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T16:23:36,643 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 771007c821cc85e485653aceb22dba4b: 2024-11-27T16:23:36,643 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b., storeName=771007c821cc85e485653aceb22dba4b/A, priority=13, startTime=1732724616046; duration=0sec 2024-11-27T16:23:36,643 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-11-27T16:23:36,643 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 771007c821cc85e485653aceb22dba4b:A 2024-11-27T16:23:36,643 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 771007c821cc85e485653aceb22dba4b:B 2024-11-27T16:23:36,643 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 4 compacting, 0 eligible, 16 blocking 2024-11-27T16:23:36,644 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/.tmp/C/742d8ff7a1ca4a70bbd8f65f3a3d90ad as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/C/742d8ff7a1ca4a70bbd8f65f3a3d90ad 2024-11-27T16:23:36,644 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 0 files of size 0 starting at candidate #-1 after considering 0 permutations with 0 in ratio 2024-11-27T16:23:36,644 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.SortedCompactionPolicy(232): Not compacting files because we only have 0 files ready for compaction. Need 3 to initiate. 2024-11-27T16:23:36,644 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.CompactSplit(450): Not compacting TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b. because compaction request was cancelled 2024-11-27T16:23:36,644 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 771007c821cc85e485653aceb22dba4b:C 2024-11-27T16:23:36,644 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 2 store files, 0 compacting, 2 eligible, 16 blocking 2024-11-27T16:23:36,645 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 0 files of size 0 starting at candidate #-1 after considering 0 permutations with 0 in ratio 2024-11-27T16:23:36,645 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.SortedCompactionPolicy(232): Not compacting files because we only have 0 files ready for compaction. Need 3 to initiate. 2024-11-27T16:23:36,645 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.CompactSplit(450): Not compacting TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b. because compaction request was cancelled 2024-11-27T16:23:36,645 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 771007c821cc85e485653aceb22dba4b:B 2024-11-27T16:23:36,648 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 771007c821cc85e485653aceb22dba4b/C of 771007c821cc85e485653aceb22dba4b into 742d8ff7a1ca4a70bbd8f65f3a3d90ad(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T16:23:36,648 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 771007c821cc85e485653aceb22dba4b: 2024-11-27T16:23:36,649 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b., storeName=771007c821cc85e485653aceb22dba4b/C, priority=12, startTime=1732724616392; duration=0sec 2024-11-27T16:23:36,649 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T16:23:36,649 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 771007c821cc85e485653aceb22dba4b:C 2024-11-27T16:23:36,753 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:23:36,753 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=164 2024-11-27T16:23:36,753 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b. 2024-11-27T16:23:36,754 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b. as already flushing 2024-11-27T16:23:36,754 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b. 2024-11-27T16:23:36,754 ERROR [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] handler.RSProcedureHandler(58): pid=164 java.io.IOException: Unable to complete flush {ENCODED => 771007c821cc85e485653aceb22dba4b, NAME => 'TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:23:36,754 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=164 java.io.IOException: Unable to complete flush {ENCODED => 771007c821cc85e485653aceb22dba4b, NAME => 'TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:23:36,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4114): Remote procedure failed, pid=164 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 771007c821cc85e485653aceb22dba4b, NAME => 'TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 771007c821cc85e485653aceb22dba4b, NAME => 'TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:23:36,808 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:36,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49368 deadline: 1732724676805, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:36,815 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:36,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 144 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49390 deadline: 1732724676811, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:36,856 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=206, memsize=20.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/.tmp/A/8dc9dbb471914717a1ebfdd9f5cd5e28 2024-11-27T16:23:36,866 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/.tmp/B/5ba25a2ebf7148458f5b05109491c405 is 50, key is test_row_0/B:col10/1732724616398/Put/seqid=0 2024-11-27T16:23:36,906 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:23:36,906 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=164 2024-11-27T16:23:36,906 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b. 2024-11-27T16:23:36,906 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b. as already flushing 2024-11-27T16:23:36,906 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b. 2024-11-27T16:23:36,906 ERROR [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] handler.RSProcedureHandler(58): pid=164 java.io.IOException: Unable to complete flush {ENCODED => 771007c821cc85e485653aceb22dba4b, NAME => 'TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:23:36,907 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=164 java.io.IOException: Unable to complete flush {ENCODED => 771007c821cc85e485653aceb22dba4b, NAME => 'TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:23:36,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4114): Remote procedure failed, pid=164 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 771007c821cc85e485653aceb22dba4b, NAME => 'TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 771007c821cc85e485653aceb22dba4b, NAME => 'TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:23:36,918 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742533_1709 (size=12151) 2024-11-27T16:23:36,919 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=206 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/.tmp/B/5ba25a2ebf7148458f5b05109491c405 2024-11-27T16:23:36,953 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/.tmp/C/50dc7945996c495d97a5975aaa63ebee is 50, key is test_row_0/C:col10/1732724616398/Put/seqid=0 2024-11-27T16:23:36,994 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742534_1710 (size=12151) 2024-11-27T16:23:37,058 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:23:37,059 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=164 2024-11-27T16:23:37,059 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b. 2024-11-27T16:23:37,059 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b. as already flushing 2024-11-27T16:23:37,059 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b. 2024-11-27T16:23:37,059 ERROR [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] handler.RSProcedureHandler(58): pid=164 java.io.IOException: Unable to complete flush {ENCODED => 771007c821cc85e485653aceb22dba4b, NAME => 'TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:23:37,059 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=164 java.io.IOException: Unable to complete flush {ENCODED => 771007c821cc85e485653aceb22dba4b, NAME => 'TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:23:37,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4114): Remote procedure failed, pid=164 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 771007c821cc85e485653aceb22dba4b, NAME => 'TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 771007c821cc85e485653aceb22dba4b, NAME => 'TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:23:37,112 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:37,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49368 deadline: 1732724677110, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:37,119 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:37,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49390 deadline: 1732724677117, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:37,213 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:23:37,213 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=164 2024-11-27T16:23:37,214 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b. 2024-11-27T16:23:37,214 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b. as already flushing 2024-11-27T16:23:37,214 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b. 2024-11-27T16:23:37,214 ERROR [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] handler.RSProcedureHandler(58): pid=164 java.io.IOException: Unable to complete flush {ENCODED => 771007c821cc85e485653aceb22dba4b, NAME => 'TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:23:37,214 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=164 java.io.IOException: Unable to complete flush {ENCODED => 771007c821cc85e485653aceb22dba4b, NAME => 'TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:23:37,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4114): Remote procedure failed, pid=164 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 771007c821cc85e485653aceb22dba4b, NAME => 'TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 771007c821cc85e485653aceb22dba4b, NAME => 'TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:23:37,367 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:23:37,367 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=164 2024-11-27T16:23:37,368 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b. 2024-11-27T16:23:37,368 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b. as already flushing 2024-11-27T16:23:37,368 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b. 2024-11-27T16:23:37,368 ERROR [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] handler.RSProcedureHandler(58): pid=164 java.io.IOException: Unable to complete flush {ENCODED => 771007c821cc85e485653aceb22dba4b, NAME => 'TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:23:37,368 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=164 java.io.IOException: Unable to complete flush {ENCODED => 771007c821cc85e485653aceb22dba4b, NAME => 'TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:23:37,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4114): Remote procedure failed, pid=164 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 771007c821cc85e485653aceb22dba4b, NAME => 'TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 771007c821cc85e485653aceb22dba4b, NAME => 'TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:23:37,395 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=206 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/.tmp/C/50dc7945996c495d97a5975aaa63ebee 2024-11-27T16:23:37,400 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/.tmp/A/8dc9dbb471914717a1ebfdd9f5cd5e28 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/A/8dc9dbb471914717a1ebfdd9f5cd5e28 2024-11-27T16:23:37,404 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/A/8dc9dbb471914717a1ebfdd9f5cd5e28, entries=200, sequenceid=206, filesize=38.8 K 2024-11-27T16:23:37,405 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/.tmp/B/5ba25a2ebf7148458f5b05109491c405 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/B/5ba25a2ebf7148458f5b05109491c405 2024-11-27T16:23:37,409 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/B/5ba25a2ebf7148458f5b05109491c405, entries=150, sequenceid=206, filesize=11.9 K 2024-11-27T16:23:37,410 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/.tmp/C/50dc7945996c495d97a5975aaa63ebee as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/C/50dc7945996c495d97a5975aaa63ebee 2024-11-27T16:23:37,414 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/C/50dc7945996c495d97a5975aaa63ebee, entries=150, sequenceid=206, filesize=11.9 K 2024-11-27T16:23:37,414 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for 771007c821cc85e485653aceb22dba4b in 1015ms, sequenceid=206, compaction requested=true 2024-11-27T16:23:37,414 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 771007c821cc85e485653aceb22dba4b: 2024-11-27T16:23:37,415 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-27T16:23:37,415 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 110979 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-27T16:23:37,415 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HStore(1540): 771007c821cc85e485653aceb22dba4b/A is initiating minor compaction (all files) 2024-11-27T16:23:37,415 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 771007c821cc85e485653aceb22dba4b/A in TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b. 2024-11-27T16:23:37,416 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/A/ec1429db38d946f68b9480a5c60509c8, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/A/93c25eecab824b60ac3606eb0fa49336, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/A/8dc9dbb471914717a1ebfdd9f5cd5e28] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/.tmp, totalSize=108.4 K 2024-11-27T16:23:37,416 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b. 2024-11-27T16:23:37,416 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b. files: [hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/A/ec1429db38d946f68b9480a5c60509c8, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/A/93c25eecab824b60ac3606eb0fa49336, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/A/8dc9dbb471914717a1ebfdd9f5cd5e28] 2024-11-27T16:23:37,415 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 771007c821cc85e485653aceb22dba4b:A, priority=-2147483648, current under compaction store size is 1 2024-11-27T16:23:37,416 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T16:23:37,416 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-27T16:23:37,416 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.Compactor(224): Compacting ec1429db38d946f68b9480a5c60509c8, keycount=150, bloomtype=ROW, size=30.7 K, encoding=NONE, compression=NONE, seqNum=169, earliestPutTs=1732724615226 2024-11-27T16:23:37,416 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 771007c821cc85e485653aceb22dba4b:B, priority=-2147483648, current under compaction store size is 2 2024-11-27T16:23:37,416 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T16:23:37,416 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 771007c821cc85e485653aceb22dba4b:C, priority=-2147483648, current under compaction store size is 3 2024-11-27T16:23:37,416 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-27T16:23:37,416 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 93c25eecab824b60ac3606eb0fa49336, keycount=200, bloomtype=ROW, size=38.8 K, encoding=NONE, compression=NONE, seqNum=194, earliestPutTs=1732724615942 2024-11-27T16:23:37,417 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8dc9dbb471914717a1ebfdd9f5cd5e28, keycount=200, bloomtype=ROW, size=38.8 K, encoding=NONE, compression=NONE, seqNum=206, earliestPutTs=1732724616075 2024-11-27T16:23:37,418 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36829 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-27T16:23:37,418 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HStore(1540): 771007c821cc85e485653aceb22dba4b/B is initiating minor compaction (all files) 2024-11-27T16:23:37,418 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 771007c821cc85e485653aceb22dba4b/B in TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b. 2024-11-27T16:23:37,418 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/B/26c678bfa0ca4ba28da987d1fdefdc3e, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/B/a17ad94addac4d4192eb6c5373eedf05, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/B/5ba25a2ebf7148458f5b05109491c405] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/.tmp, totalSize=36.0 K 2024-11-27T16:23:37,419 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting 26c678bfa0ca4ba28da987d1fdefdc3e, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=169, earliestPutTs=1732724615226 2024-11-27T16:23:37,419 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting a17ad94addac4d4192eb6c5373eedf05, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=194, earliestPutTs=1732724615949 2024-11-27T16:23:37,419 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting 5ba25a2ebf7148458f5b05109491c405, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=206, earliestPutTs=1732724616075 2024-11-27T16:23:37,436 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=771007c821cc85e485653aceb22dba4b] 2024-11-27T16:23:37,456 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 771007c821cc85e485653aceb22dba4b#B#compaction#609 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-27T16:23:37,457 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/.tmp/B/39a23149aec6486785fd8db94cc5275a is 50, key is test_row_0/B:col10/1732724616398/Put/seqid=0 2024-11-27T16:23:37,472 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241127ada452cc927a430194a56b9b1c1c3f0e_771007c821cc85e485653aceb22dba4b store=[table=TestAcidGuarantees family=A region=771007c821cc85e485653aceb22dba4b] 2024-11-27T16:23:37,474 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241127ada452cc927a430194a56b9b1c1c3f0e_771007c821cc85e485653aceb22dba4b, store=[table=TestAcidGuarantees family=A region=771007c821cc85e485653aceb22dba4b] 2024-11-27T16:23:37,474 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241127ada452cc927a430194a56b9b1c1c3f0e_771007c821cc85e485653aceb22dba4b because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=771007c821cc85e485653aceb22dba4b] 2024-11-27T16:23:37,512 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742535_1711 (size=12629) 2024-11-27T16:23:37,518 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/.tmp/B/39a23149aec6486785fd8db94cc5275a as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/B/39a23149aec6486785fd8db94cc5275a 2024-11-27T16:23:37,521 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:23:37,522 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=164 2024-11-27T16:23:37,522 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b. 2024-11-27T16:23:37,522 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 771007c821cc85e485653aceb22dba4b/B of 771007c821cc85e485653aceb22dba4b into 39a23149aec6486785fd8db94cc5275a(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T16:23:37,522 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 771007c821cc85e485653aceb22dba4b: 2024-11-27T16:23:37,522 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b., storeName=771007c821cc85e485653aceb22dba4b/B, priority=13, startTime=1732724617416; duration=0sec 2024-11-27T16:23:37,522 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegion(2837): Flushing 771007c821cc85e485653aceb22dba4b 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-11-27T16:23:37,522 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-27T16:23:37,522 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 771007c821cc85e485653aceb22dba4b:B 2024-11-27T16:23:37,522 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 2 store files, 0 compacting, 2 eligible, 16 blocking 2024-11-27T16:23:37,522 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 771007c821cc85e485653aceb22dba4b, store=A 2024-11-27T16:23:37,522 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:23:37,522 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 771007c821cc85e485653aceb22dba4b, store=B 2024-11-27T16:23:37,522 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:23:37,522 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 771007c821cc85e485653aceb22dba4b, store=C 2024-11-27T16:23:37,522 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:23:37,523 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 0 files of size 0 starting at candidate #-1 after considering 0 permutations with 0 in ratio 2024-11-27T16:23:37,523 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.SortedCompactionPolicy(232): Not compacting files because we only have 0 files ready for compaction. Need 3 to initiate. 2024-11-27T16:23:37,523 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.CompactSplit(450): Not compacting TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b. because compaction request was cancelled 2024-11-27T16:23:37,523 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 771007c821cc85e485653aceb22dba4b:C 2024-11-27T16:23:37,524 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742536_1712 (size=4469) 2024-11-27T16:23:37,543 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 771007c821cc85e485653aceb22dba4b#A#compaction#608 average throughput is 0.23 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-27T16:23:37,544 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/.tmp/A/2b036d1d0521450187f99bf2a2fdb1fe is 175, key is test_row_0/A:col10/1732724616398/Put/seqid=0 2024-11-27T16:23:37,557 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411279d5a5cbf3f724a23b45236dbe65ea441_771007c821cc85e485653aceb22dba4b is 50, key is test_row_0/A:col10/1732724616495/Put/seqid=0 2024-11-27T16:23:37,585 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742537_1713 (size=31583) 2024-11-27T16:23:37,593 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/.tmp/A/2b036d1d0521450187f99bf2a2fdb1fe as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/A/2b036d1d0521450187f99bf2a2fdb1fe 2024-11-27T16:23:37,598 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 771007c821cc85e485653aceb22dba4b/A of 771007c821cc85e485653aceb22dba4b into 2b036d1d0521450187f99bf2a2fdb1fe(size=30.8 K), total size for store is 30.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T16:23:37,598 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 771007c821cc85e485653aceb22dba4b: 2024-11-27T16:23:37,598 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b., storeName=771007c821cc85e485653aceb22dba4b/A, priority=13, startTime=1732724617414; duration=0sec 2024-11-27T16:23:37,599 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T16:23:37,599 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 771007c821cc85e485653aceb22dba4b:A 2024-11-27T16:23:37,601 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742538_1714 (size=12304) 2024-11-27T16:23:37,604 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:37,608 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411279d5a5cbf3f724a23b45236dbe65ea441_771007c821cc85e485653aceb22dba4b to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411279d5a5cbf3f724a23b45236dbe65ea441_771007c821cc85e485653aceb22dba4b 2024-11-27T16:23:37,609 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/.tmp/A/053d7c73f999431fb8543d70083b0104, store: [table=TestAcidGuarantees family=A region=771007c821cc85e485653aceb22dba4b] 2024-11-27T16:23:37,610 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/.tmp/A/053d7c73f999431fb8543d70083b0104 is 175, key is test_row_0/A:col10/1732724616495/Put/seqid=0 2024-11-27T16:23:37,615 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b. as already flushing 2024-11-27T16:23:37,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(8581): Flush requested on 771007c821cc85e485653aceb22dba4b 2024-11-27T16:23:37,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=163 2024-11-27T16:23:37,629 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742539_1715 (size=31105) 2024-11-27T16:23:37,639 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:37,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 151 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49390 deadline: 1732724677636, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:37,639 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:37,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49368 deadline: 1732724677636, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:37,641 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=234, memsize=47.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/.tmp/A/053d7c73f999431fb8543d70083b0104 2024-11-27T16:23:37,663 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/.tmp/B/ad013440164b46c6a3148fa28e15e5b3 is 50, key is test_row_0/B:col10/1732724616495/Put/seqid=0 2024-11-27T16:23:37,689 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742540_1716 (size=12151) 2024-11-27T16:23:37,747 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:37,747 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:37,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 153 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49390 deadline: 1732724677745, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:37,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 158 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49368 deadline: 1732724677744, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:37,952 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:37,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 160 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49368 deadline: 1732724677949, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:37,952 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:37,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 155 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49390 deadline: 1732724677949, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:38,090 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=234 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/.tmp/B/ad013440164b46c6a3148fa28e15e5b3 2024-11-27T16:23:38,113 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/.tmp/C/74fdaa47f7f242afb68206037985e132 is 50, key is test_row_0/C:col10/1732724616495/Put/seqid=0 2024-11-27T16:23:38,146 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742541_1717 (size=12151) 2024-11-27T16:23:38,258 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:38,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 162 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49368 deadline: 1732724678255, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:38,270 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:38,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 157 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49390 deadline: 1732724678268, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:38,546 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=234 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/.tmp/C/74fdaa47f7f242afb68206037985e132 2024-11-27T16:23:38,551 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/.tmp/A/053d7c73f999431fb8543d70083b0104 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/A/053d7c73f999431fb8543d70083b0104 2024-11-27T16:23:38,556 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/A/053d7c73f999431fb8543d70083b0104, entries=150, sequenceid=234, filesize=30.4 K 2024-11-27T16:23:38,557 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/.tmp/B/ad013440164b46c6a3148fa28e15e5b3 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/B/ad013440164b46c6a3148fa28e15e5b3 2024-11-27T16:23:38,560 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/B/ad013440164b46c6a3148fa28e15e5b3, entries=150, sequenceid=234, filesize=11.9 K 2024-11-27T16:23:38,562 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/.tmp/C/74fdaa47f7f242afb68206037985e132 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/C/74fdaa47f7f242afb68206037985e132 2024-11-27T16:23:38,566 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/C/74fdaa47f7f242afb68206037985e132, entries=150, sequenceid=234, filesize=11.9 K 2024-11-27T16:23:38,567 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=60.38 KB/61830 for 771007c821cc85e485653aceb22dba4b in 1045ms, sequenceid=234, compaction requested=true 2024-11-27T16:23:38,567 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegion(2538): Flush status journal for 771007c821cc85e485653aceb22dba4b: 2024-11-27T16:23:38,567 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b. 2024-11-27T16:23:38,567 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=164 2024-11-27T16:23:38,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4106): Remote procedure done, pid=164 2024-11-27T16:23:38,570 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=164, resume processing ppid=163 2024-11-27T16:23:38,570 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=164, ppid=163, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 3.0510 sec 2024-11-27T16:23:38,571 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=163, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=163, table=TestAcidGuarantees in 3.0570 sec 2024-11-27T16:23:38,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(8581): Flush requested on 771007c821cc85e485653aceb22dba4b 2024-11-27T16:23:38,767 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 771007c821cc85e485653aceb22dba4b 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-11-27T16:23:38,767 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 771007c821cc85e485653aceb22dba4b, store=A 2024-11-27T16:23:38,767 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:23:38,767 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 771007c821cc85e485653aceb22dba4b, store=B 2024-11-27T16:23:38,767 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:23:38,767 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 771007c821cc85e485653aceb22dba4b, store=C 2024-11-27T16:23:38,768 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:23:38,785 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411274ba6adbfa6024ed284dd9108fdb7e251_771007c821cc85e485653aceb22dba4b is 50, key is test_row_0/A:col10/1732724618765/Put/seqid=0 2024-11-27T16:23:38,802 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742542_1718 (size=12304) 2024-11-27T16:23:38,803 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:38,809 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411274ba6adbfa6024ed284dd9108fdb7e251_771007c821cc85e485653aceb22dba4b to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411274ba6adbfa6024ed284dd9108fdb7e251_771007c821cc85e485653aceb22dba4b 2024-11-27T16:23:38,810 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/.tmp/A/728684a847e34415ae1e112b3a970fbf, store: [table=TestAcidGuarantees family=A region=771007c821cc85e485653aceb22dba4b] 2024-11-27T16:23:38,810 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/.tmp/A/728684a847e34415ae1e112b3a970fbf is 175, key is test_row_0/A:col10/1732724618765/Put/seqid=0 2024-11-27T16:23:38,823 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742543_1719 (size=31105) 2024-11-27T16:23:38,872 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:38,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 168 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49390 deadline: 1732724678868, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:38,872 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:38,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 177 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49368 deadline: 1732724678869, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:38,981 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:38,980 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:38,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 170 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49390 deadline: 1732724678974, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:38,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 179 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49368 deadline: 1732724678974, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:39,187 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:39,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 181 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49368 deadline: 1732724679182, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:39,187 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:39,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 172 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49390 deadline: 1732724679183, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:39,224 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=248, memsize=22.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/.tmp/A/728684a847e34415ae1e112b3a970fbf 2024-11-27T16:23:39,235 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/.tmp/B/d4db919c21954b5ea76fda64a178df8b is 50, key is test_row_0/B:col10/1732724618765/Put/seqid=0 2024-11-27T16:23:39,271 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742544_1720 (size=12151) 2024-11-27T16:23:39,271 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=248 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/.tmp/B/d4db919c21954b5ea76fda64a178df8b 2024-11-27T16:23:39,281 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/.tmp/C/59b174b28cd64bb59b40c26878ae0353 is 50, key is test_row_0/C:col10/1732724618765/Put/seqid=0 2024-11-27T16:23:39,313 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742545_1721 (size=12151) 2024-11-27T16:23:39,314 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=248 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/.tmp/C/59b174b28cd64bb59b40c26878ae0353 2024-11-27T16:23:39,318 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/.tmp/A/728684a847e34415ae1e112b3a970fbf as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/A/728684a847e34415ae1e112b3a970fbf 2024-11-27T16:23:39,322 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/A/728684a847e34415ae1e112b3a970fbf, entries=150, sequenceid=248, filesize=30.4 K 2024-11-27T16:23:39,323 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/.tmp/B/d4db919c21954b5ea76fda64a178df8b as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/B/d4db919c21954b5ea76fda64a178df8b 2024-11-27T16:23:39,328 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/B/d4db919c21954b5ea76fda64a178df8b, entries=150, sequenceid=248, filesize=11.9 K 2024-11-27T16:23:39,329 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/.tmp/C/59b174b28cd64bb59b40c26878ae0353 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/C/59b174b28cd64bb59b40c26878ae0353 2024-11-27T16:23:39,332 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/C/59b174b28cd64bb59b40c26878ae0353, entries=150, sequenceid=248, filesize=11.9 K 2024-11-27T16:23:39,333 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=140.89 KB/144270 for 771007c821cc85e485653aceb22dba4b in 566ms, sequenceid=248, compaction requested=true 2024-11-27T16:23:39,333 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 771007c821cc85e485653aceb22dba4b: 2024-11-27T16:23:39,333 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-27T16:23:39,334 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 93793 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-27T16:23:39,334 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HStore(1540): 771007c821cc85e485653aceb22dba4b/A is initiating minor compaction (all files) 2024-11-27T16:23:39,334 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 771007c821cc85e485653aceb22dba4b/A in TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b. 2024-11-27T16:23:39,334 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/A/2b036d1d0521450187f99bf2a2fdb1fe, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/A/053d7c73f999431fb8543d70083b0104, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/A/728684a847e34415ae1e112b3a970fbf] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/.tmp, totalSize=91.6 K 2024-11-27T16:23:39,334 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b. 2024-11-27T16:23:39,334 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b. files: [hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/A/2b036d1d0521450187f99bf2a2fdb1fe, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/A/053d7c73f999431fb8543d70083b0104, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/A/728684a847e34415ae1e112b3a970fbf] 2024-11-27T16:23:39,336 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2b036d1d0521450187f99bf2a2fdb1fe, keycount=150, bloomtype=ROW, size=30.8 K, encoding=NONE, compression=NONE, seqNum=206, earliestPutTs=1732724616075 2024-11-27T16:23:39,336 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 053d7c73f999431fb8543d70083b0104, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=234, earliestPutTs=1732724616486 2024-11-27T16:23:39,337 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 728684a847e34415ae1e112b3a970fbf, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=248, earliestPutTs=1732724617624 2024-11-27T16:23:39,341 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 771007c821cc85e485653aceb22dba4b:A, priority=-2147483648, current under compaction store size is 1 2024-11-27T16:23:39,341 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T16:23:39,341 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-27T16:23:39,342 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36931 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-27T16:23:39,342 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HStore(1540): 771007c821cc85e485653aceb22dba4b/B is initiating minor compaction (all files) 2024-11-27T16:23:39,342 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 771007c821cc85e485653aceb22dba4b/B in TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b. 2024-11-27T16:23:39,342 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/B/39a23149aec6486785fd8db94cc5275a, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/B/ad013440164b46c6a3148fa28e15e5b3, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/B/d4db919c21954b5ea76fda64a178df8b] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/.tmp, totalSize=36.1 K 2024-11-27T16:23:39,342 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=771007c821cc85e485653aceb22dba4b] 2024-11-27T16:23:39,342 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting 39a23149aec6486785fd8db94cc5275a, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=206, earliestPutTs=1732724616075 2024-11-27T16:23:39,342 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting ad013440164b46c6a3148fa28e15e5b3, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=234, earliestPutTs=1732724616486 2024-11-27T16:23:39,343 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting d4db919c21954b5ea76fda64a178df8b, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=248, earliestPutTs=1732724617624 2024-11-27T16:23:39,348 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 771007c821cc85e485653aceb22dba4b:B, priority=-2147483648, current under compaction store size is 2 2024-11-27T16:23:39,348 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T16:23:39,348 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 771007c821cc85e485653aceb22dba4b:C, priority=-2147483648, current under compaction store size is 3 2024-11-27T16:23:39,348 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-27T16:23:39,365 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202411272f53bc4775364dadbc8cdad7db6562d1_771007c821cc85e485653aceb22dba4b store=[table=TestAcidGuarantees family=A region=771007c821cc85e485653aceb22dba4b] 2024-11-27T16:23:39,370 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202411272f53bc4775364dadbc8cdad7db6562d1_771007c821cc85e485653aceb22dba4b, store=[table=TestAcidGuarantees family=A region=771007c821cc85e485653aceb22dba4b] 2024-11-27T16:23:39,370 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411272f53bc4775364dadbc8cdad7db6562d1_771007c821cc85e485653aceb22dba4b because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=771007c821cc85e485653aceb22dba4b] 2024-11-27T16:23:39,378 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 771007c821cc85e485653aceb22dba4b#B#compaction#617 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-27T16:23:39,378 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/.tmp/B/5df8f7b1208d4435a6df8a9353d63168 is 50, key is test_row_0/B:col10/1732724618765/Put/seqid=0 2024-11-27T16:23:39,423 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742546_1722 (size=4469) 2024-11-27T16:23:39,426 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 771007c821cc85e485653aceb22dba4b#A#compaction#616 average throughput is 0.29 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-27T16:23:39,427 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/.tmp/A/086e5fbcee744012880e1c9cb5dcd58c is 175, key is test_row_0/A:col10/1732724618765/Put/seqid=0 2024-11-27T16:23:39,466 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742547_1723 (size=12731) 2024-11-27T16:23:39,471 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/.tmp/B/5df8f7b1208d4435a6df8a9353d63168 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/B/5df8f7b1208d4435a6df8a9353d63168 2024-11-27T16:23:39,475 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 771007c821cc85e485653aceb22dba4b/B of 771007c821cc85e485653aceb22dba4b into 5df8f7b1208d4435a6df8a9353d63168(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T16:23:39,475 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 771007c821cc85e485653aceb22dba4b: 2024-11-27T16:23:39,475 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b., storeName=771007c821cc85e485653aceb22dba4b/B, priority=13, startTime=1732724619341; duration=0sec 2024-11-27T16:23:39,475 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-27T16:23:39,475 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 771007c821cc85e485653aceb22dba4b:B 2024-11-27T16:23:39,475 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-27T16:23:39,476 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49014 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-27T16:23:39,477 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HStore(1540): 771007c821cc85e485653aceb22dba4b/C is initiating minor compaction (all files) 2024-11-27T16:23:39,477 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 771007c821cc85e485653aceb22dba4b/C in TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b. 2024-11-27T16:23:39,477 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/C/742d8ff7a1ca4a70bbd8f65f3a3d90ad, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/C/50dc7945996c495d97a5975aaa63ebee, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/C/74fdaa47f7f242afb68206037985e132, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/C/59b174b28cd64bb59b40c26878ae0353] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/.tmp, totalSize=47.9 K 2024-11-27T16:23:39,477 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting 742d8ff7a1ca4a70bbd8f65f3a3d90ad, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=194, earliestPutTs=1732724615949 2024-11-27T16:23:39,477 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting 50dc7945996c495d97a5975aaa63ebee, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=206, earliestPutTs=1732724616075 2024-11-27T16:23:39,478 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting 74fdaa47f7f242afb68206037985e132, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=234, earliestPutTs=1732724616486 2024-11-27T16:23:39,478 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting 59b174b28cd64bb59b40c26878ae0353, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=248, earliestPutTs=1732724617624 2024-11-27T16:23:39,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(8581): Flush requested on 771007c821cc85e485653aceb22dba4b 2024-11-27T16:23:39,493 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 771007c821cc85e485653aceb22dba4b 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-27T16:23:39,493 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 771007c821cc85e485653aceb22dba4b, store=A 2024-11-27T16:23:39,494 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:23:39,494 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 771007c821cc85e485653aceb22dba4b, store=B 2024-11-27T16:23:39,494 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:23:39,494 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 771007c821cc85e485653aceb22dba4b, store=C 2024-11-27T16:23:39,494 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:23:39,496 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742548_1724 (size=31685) 2024-11-27T16:23:39,502 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 771007c821cc85e485653aceb22dba4b#C#compaction#618 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-27T16:23:39,503 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/.tmp/A/086e5fbcee744012880e1c9cb5dcd58c as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/A/086e5fbcee744012880e1c9cb5dcd58c 2024-11-27T16:23:39,503 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/.tmp/C/3f551bcd13b34cdfb73fd8a379a677a5 is 50, key is test_row_0/C:col10/1732724618765/Put/seqid=0 2024-11-27T16:23:39,509 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 771007c821cc85e485653aceb22dba4b/A of 771007c821cc85e485653aceb22dba4b into 086e5fbcee744012880e1c9cb5dcd58c(size=30.9 K), total size for store is 30.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T16:23:39,509 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 771007c821cc85e485653aceb22dba4b: 2024-11-27T16:23:39,509 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b., storeName=771007c821cc85e485653aceb22dba4b/A, priority=13, startTime=1732724619333; duration=0sec 2024-11-27T16:23:39,509 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T16:23:39,509 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 771007c821cc85e485653aceb22dba4b:A 2024-11-27T16:23:39,518 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:39,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 178 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49390 deadline: 1732724679516, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:39,519 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:39,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 188 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49368 deadline: 1732724679517, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:39,533 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241127ca3c36ee16aa4a04ad9035bbe29bdab1_771007c821cc85e485653aceb22dba4b is 50, key is test_row_0/A:col10/1732724619492/Put/seqid=0 2024-11-27T16:23:39,558 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742549_1725 (size=12697) 2024-11-27T16:23:39,594 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742550_1726 (size=17534) 2024-11-27T16:23:39,594 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:39,603 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241127ca3c36ee16aa4a04ad9035bbe29bdab1_771007c821cc85e485653aceb22dba4b to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241127ca3c36ee16aa4a04ad9035bbe29bdab1_771007c821cc85e485653aceb22dba4b 2024-11-27T16:23:39,603 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/.tmp/A/641461dfb3b74e5eb5a38137d18cac3e, store: [table=TestAcidGuarantees family=A region=771007c821cc85e485653aceb22dba4b] 2024-11-27T16:23:39,604 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/.tmp/A/641461dfb3b74e5eb5a38137d18cac3e is 175, key is test_row_0/A:col10/1732724619492/Put/seqid=0 2024-11-27T16:23:39,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=163 2024-11-27T16:23:39,621 INFO [Thread-2863 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 163 completed 2024-11-27T16:23:39,623 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-27T16:23:39,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] procedure2.ProcedureExecutor(1098): Stored pid=165, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=165, table=TestAcidGuarantees 2024-11-27T16:23:39,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=165 2024-11-27T16:23:39,622 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:39,624 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=165, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=165, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-27T16:23:39,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 190 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49368 deadline: 1732724679620, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:39,625 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=165, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=165, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-27T16:23:39,625 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=166, ppid=165, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-27T16:23:39,625 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:39,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 180 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49390 deadline: 1732724679622, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:39,644 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742551_1727 (size=48639) 2024-11-27T16:23:39,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=165 2024-11-27T16:23:39,777 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:23:39,777 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=166 2024-11-27T16:23:39,777 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b. 2024-11-27T16:23:39,777 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b. as already flushing 2024-11-27T16:23:39,778 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b. 2024-11-27T16:23:39,778 ERROR [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] handler.RSProcedureHandler(58): pid=166 java.io.IOException: Unable to complete flush {ENCODED => 771007c821cc85e485653aceb22dba4b, NAME => 'TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:23:39,778 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=166 java.io.IOException: Unable to complete flush {ENCODED => 771007c821cc85e485653aceb22dba4b, NAME => 'TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:23:39,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4114): Remote procedure failed, pid=166 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 771007c821cc85e485653aceb22dba4b, NAME => 'TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 771007c821cc85e485653aceb22dba4b, NAME => 'TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:23:39,831 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:39,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 192 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49368 deadline: 1732724679828, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:39,831 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:39,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 182 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49390 deadline: 1732724679827, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:39,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=165 2024-11-27T16:23:39,929 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:23:39,929 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=166 2024-11-27T16:23:39,930 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b. 2024-11-27T16:23:39,930 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b. as already flushing 2024-11-27T16:23:39,930 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b. 2024-11-27T16:23:39,930 ERROR [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] handler.RSProcedureHandler(58): pid=166 java.io.IOException: Unable to complete flush {ENCODED => 771007c821cc85e485653aceb22dba4b, NAME => 'TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:23:39,930 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=166 java.io.IOException: Unable to complete flush {ENCODED => 771007c821cc85e485653aceb22dba4b, NAME => 'TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:23:39,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4114): Remote procedure failed, pid=166 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 771007c821cc85e485653aceb22dba4b, NAME => 'TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 771007c821cc85e485653aceb22dba4b, NAME => 'TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:23:39,964 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/.tmp/C/3f551bcd13b34cdfb73fd8a379a677a5 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/C/3f551bcd13b34cdfb73fd8a379a677a5 2024-11-27T16:23:39,970 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 771007c821cc85e485653aceb22dba4b/C of 771007c821cc85e485653aceb22dba4b into 3f551bcd13b34cdfb73fd8a379a677a5(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T16:23:39,970 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 771007c821cc85e485653aceb22dba4b: 2024-11-27T16:23:39,970 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b., storeName=771007c821cc85e485653aceb22dba4b/C, priority=12, startTime=1732724619348; duration=0sec 2024-11-27T16:23:39,971 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T16:23:39,971 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 771007c821cc85e485653aceb22dba4b:C 2024-11-27T16:23:40,046 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=275, memsize=51.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/.tmp/A/641461dfb3b74e5eb5a38137d18cac3e 2024-11-27T16:23:40,058 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/.tmp/B/15c405ff2fa84cffbb247bf59339b576 is 50, key is test_row_0/B:col10/1732724619492/Put/seqid=0 2024-11-27T16:23:40,081 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:23:40,082 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=166 2024-11-27T16:23:40,082 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b. 2024-11-27T16:23:40,082 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b. as already flushing 2024-11-27T16:23:40,082 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b. 2024-11-27T16:23:40,082 ERROR [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] handler.RSProcedureHandler(58): pid=166 java.io.IOException: Unable to complete flush {ENCODED => 771007c821cc85e485653aceb22dba4b, NAME => 'TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:23:40,082 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=166 java.io.IOException: Unable to complete flush {ENCODED => 771007c821cc85e485653aceb22dba4b, NAME => 'TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:23:40,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4114): Remote procedure failed, pid=166 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 771007c821cc85e485653aceb22dba4b, NAME => 'TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 771007c821cc85e485653aceb22dba4b, NAME => 'TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:23:40,104 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742552_1728 (size=12301) 2024-11-27T16:23:40,111 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=275 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/.tmp/B/15c405ff2fa84cffbb247bf59339b576 2024-11-27T16:23:40,133 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/.tmp/C/6807a17188454156994f7ec66616e590 is 50, key is test_row_0/C:col10/1732724619492/Put/seqid=0 2024-11-27T16:23:40,134 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:40,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 194 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49368 deadline: 1732724680134, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:40,152 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:40,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 184 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49390 deadline: 1732724680150, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:40,182 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742553_1729 (size=12301) 2024-11-27T16:23:40,182 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=275 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/.tmp/C/6807a17188454156994f7ec66616e590 2024-11-27T16:23:40,189 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/.tmp/A/641461dfb3b74e5eb5a38137d18cac3e as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/A/641461dfb3b74e5eb5a38137d18cac3e 2024-11-27T16:23:40,193 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/A/641461dfb3b74e5eb5a38137d18cac3e, entries=250, sequenceid=275, filesize=47.5 K 2024-11-27T16:23:40,193 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/.tmp/B/15c405ff2fa84cffbb247bf59339b576 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/B/15c405ff2fa84cffbb247bf59339b576 2024-11-27T16:23:40,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,197 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/B/15c405ff2fa84cffbb247bf59339b576, entries=150, sequenceid=275, filesize=12.0 K 2024-11-27T16:23:40,198 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/.tmp/C/6807a17188454156994f7ec66616e590 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/C/6807a17188454156994f7ec66616e590 2024-11-27T16:23:40,201 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/C/6807a17188454156994f7ec66616e590, entries=150, sequenceid=275, filesize=12.0 K 2024-11-27T16:23:40,202 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=46.96 KB/48090 for 771007c821cc85e485653aceb22dba4b in 709ms, sequenceid=275, compaction requested=false 2024-11-27T16:23:40,202 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 771007c821cc85e485653aceb22dba4b: 2024-11-27T16:23:40,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=165 2024-11-27T16:23:40,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,234 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:23:40,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,236 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=166 2024-11-27T16:23:40,236 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b. 2024-11-27T16:23:40,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,236 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegion(2837): Flushing 771007c821cc85e485653aceb22dba4b 3/3 column families, dataSize=46.96 KB heapSize=123.80 KB 2024-11-27T16:23:40,236 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 771007c821cc85e485653aceb22dba4b, store=A 2024-11-27T16:23:40,236 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:23:40,236 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 771007c821cc85e485653aceb22dba4b, store=B 2024-11-27T16:23:40,236 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:23:40,237 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 771007c821cc85e485653aceb22dba4b, store=C 2024-11-27T16:23:40,237 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:23:40,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,261 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241127684c7031bc9e4f33b5aec3ac36b52d8d_771007c821cc85e485653aceb22dba4b is 50, key is test_row_0/A:col10/1732724619515/Put/seqid=0 2024-11-27T16:23:40,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,297 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742554_1730 (size=9914) 2024-11-27T16:23:40,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,639 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b. as already flushing 2024-11-27T16:23:40,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(8581): Flush requested on 771007c821cc85e485653aceb22dba4b 2024-11-27T16:23:40,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,699 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:40,706 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241127684c7031bc9e4f33b5aec3ac36b52d8d_771007c821cc85e485653aceb22dba4b to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241127684c7031bc9e4f33b5aec3ac36b52d8d_771007c821cc85e485653aceb22dba4b 2024-11-27T16:23:40,707 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/.tmp/A/72f4e26c83e7449492d48d7400a17e12, store: [table=TestAcidGuarantees family=A region=771007c821cc85e485653aceb22dba4b] 2024-11-27T16:23:40,708 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/.tmp/A/72f4e26c83e7449492d48d7400a17e12 is 175, key is test_row_0/A:col10/1732724619515/Put/seqid=0 2024-11-27T16:23:40,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=165 2024-11-27T16:23:40,733 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742555_1731 (size=22561) 2024-11-27T16:23:40,745 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:40,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 196 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49390 deadline: 1732724680741, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:40,749 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:40,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 209 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49368 deadline: 1732724680746, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:40,851 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:40,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 198 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49390 deadline: 1732724680848, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:40,853 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:40,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 211 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49368 deadline: 1732724680851, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:41,055 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:41,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 200 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49390 deadline: 1732724681053, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:41,056 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:41,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 213 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49368 deadline: 1732724681054, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:41,134 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=287, memsize=15.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/.tmp/A/72f4e26c83e7449492d48d7400a17e12 2024-11-27T16:23:41,165 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/.tmp/B/7da9a7332db042acb5b9bd3828bedf1b is 50, key is test_row_0/B:col10/1732724619515/Put/seqid=0 2024-11-27T16:23:41,193 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742556_1732 (size=9857) 2024-11-27T16:23:41,194 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=287 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/.tmp/B/7da9a7332db042acb5b9bd3828bedf1b 2024-11-27T16:23:41,205 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/.tmp/C/647331dde44b47ef8f4165c366037971 is 50, key is test_row_0/C:col10/1732724619515/Put/seqid=0 2024-11-27T16:23:41,242 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742557_1733 (size=9857) 2024-11-27T16:23:41,361 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:41,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 202 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49390 deadline: 1732724681359, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:41,362 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:41,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 215 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49368 deadline: 1732724681359, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:41,644 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=287 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/.tmp/C/647331dde44b47ef8f4165c366037971 2024-11-27T16:23:41,648 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/.tmp/A/72f4e26c83e7449492d48d7400a17e12 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/A/72f4e26c83e7449492d48d7400a17e12 2024-11-27T16:23:41,653 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/A/72f4e26c83e7449492d48d7400a17e12, entries=100, sequenceid=287, filesize=22.0 K 2024-11-27T16:23:41,654 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/.tmp/B/7da9a7332db042acb5b9bd3828bedf1b as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/B/7da9a7332db042acb5b9bd3828bedf1b 2024-11-27T16:23:41,658 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/B/7da9a7332db042acb5b9bd3828bedf1b, entries=100, sequenceid=287, filesize=9.6 K 2024-11-27T16:23:41,659 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/.tmp/C/647331dde44b47ef8f4165c366037971 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/C/647331dde44b47ef8f4165c366037971 2024-11-27T16:23:41,664 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/C/647331dde44b47ef8f4165c366037971, entries=100, sequenceid=287, filesize=9.6 K 2024-11-27T16:23:41,665 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegion(3040): Finished flush of dataSize ~46.96 KB/48090, heapSize ~123.75 KB/126720, currentSize=154.31 KB/158010 for 771007c821cc85e485653aceb22dba4b in 1429ms, sequenceid=287, compaction requested=true 2024-11-27T16:23:41,665 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegion(2538): Flush status journal for 771007c821cc85e485653aceb22dba4b: 2024-11-27T16:23:41,665 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b. 2024-11-27T16:23:41,665 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=166 2024-11-27T16:23:41,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4106): Remote procedure done, pid=166 2024-11-27T16:23:41,671 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=166, resume processing ppid=165 2024-11-27T16:23:41,671 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=166, ppid=165, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.0450 sec 2024-11-27T16:23:41,673 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=165, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=165, table=TestAcidGuarantees in 2.0490 sec 2024-11-27T16:23:41,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=165 2024-11-27T16:23:41,728 INFO [Thread-2863 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 165 completed 2024-11-27T16:23:41,729 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-27T16:23:41,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] procedure2.ProcedureExecutor(1098): Stored pid=167, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=167, table=TestAcidGuarantees 2024-11-27T16:23:41,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=167 2024-11-27T16:23:41,731 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=167, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=167, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-27T16:23:41,732 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=167, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=167, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-27T16:23:41,732 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=168, ppid=167, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-27T16:23:41,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=167 2024-11-27T16:23:41,868 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 771007c821cc85e485653aceb22dba4b 3/3 column families, dataSize=161.02 KB heapSize=422.63 KB 2024-11-27T16:23:41,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(8581): Flush requested on 771007c821cc85e485653aceb22dba4b 2024-11-27T16:23:41,870 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 771007c821cc85e485653aceb22dba4b, store=A 2024-11-27T16:23:41,870 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:23:41,870 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 771007c821cc85e485653aceb22dba4b, store=B 2024-11-27T16:23:41,870 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:23:41,870 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 771007c821cc85e485653aceb22dba4b, store=C 2024-11-27T16:23:41,870 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:23:41,883 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:23:41,883 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:41,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 220 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49368 deadline: 1732724681880, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:41,884 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=168 2024-11-27T16:23:41,884 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b. 2024-11-27T16:23:41,884 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b. as already flushing 2024-11-27T16:23:41,884 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b. 2024-11-27T16:23:41,884 ERROR [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] handler.RSProcedureHandler(58): pid=168 java.io.IOException: Unable to complete flush {ENCODED => 771007c821cc85e485653aceb22dba4b, NAME => 'TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:23:41,884 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=168 java.io.IOException: Unable to complete flush {ENCODED => 771007c821cc85e485653aceb22dba4b, NAME => 'TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:23:41,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4114): Remote procedure failed, pid=168 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 771007c821cc85e485653aceb22dba4b, NAME => 'TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 771007c821cc85e485653aceb22dba4b, NAME => 'TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:23:41,887 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:41,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 208 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49390 deadline: 1732724681882, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:41,890 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241127f5a0755802e248b192d93b72ce6b0b33_771007c821cc85e485653aceb22dba4b is 50, key is test_row_0/A:col10/1732724621868/Put/seqid=0 2024-11-27T16:23:41,931 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742558_1734 (size=14994) 2024-11-27T16:23:41,988 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:41,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 222 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49368 deadline: 1732724681986, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:41,990 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:41,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 210 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49390 deadline: 1732724681988, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:42,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=167 2024-11-27T16:23:42,036 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:23:42,037 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=168 2024-11-27T16:23:42,037 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b. 2024-11-27T16:23:42,037 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b. as already flushing 2024-11-27T16:23:42,037 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b. 2024-11-27T16:23:42,037 ERROR [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] handler.RSProcedureHandler(58): pid=168 java.io.IOException: Unable to complete flush {ENCODED => 771007c821cc85e485653aceb22dba4b, NAME => 'TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:23:42,037 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=168 java.io.IOException: Unable to complete flush {ENCODED => 771007c821cc85e485653aceb22dba4b, NAME => 'TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:23:42,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4114): Remote procedure failed, pid=168 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 771007c821cc85e485653aceb22dba4b, NAME => 'TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 771007c821cc85e485653aceb22dba4b, NAME => 'TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:23:42,189 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:23:42,196 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=168 2024-11-27T16:23:42,199 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:42,199 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:42,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 212 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49390 deadline: 1732724682197, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:42,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 224 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49368 deadline: 1732724682197, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:42,207 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b. 2024-11-27T16:23:42,207 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b. as already flushing 2024-11-27T16:23:42,207 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b. 2024-11-27T16:23:42,207 ERROR [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] handler.RSProcedureHandler(58): pid=168 java.io.IOException: Unable to complete flush {ENCODED => 771007c821cc85e485653aceb22dba4b, NAME => 'TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:23:42,208 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=168 java.io.IOException: Unable to complete flush {ENCODED => 771007c821cc85e485653aceb22dba4b, NAME => 'TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:23:42,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4114): Remote procedure failed, pid=168 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 771007c821cc85e485653aceb22dba4b, NAME => 'TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 771007c821cc85e485653aceb22dba4b, NAME => 'TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:23:42,332 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:42,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=167 2024-11-27T16:23:42,337 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241127f5a0755802e248b192d93b72ce6b0b33_771007c821cc85e485653aceb22dba4b to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241127f5a0755802e248b192d93b72ce6b0b33_771007c821cc85e485653aceb22dba4b 2024-11-27T16:23:42,339 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/.tmp/A/a15a3764e81c4d0098b90cc62741a466, store: [table=TestAcidGuarantees family=A region=771007c821cc85e485653aceb22dba4b] 2024-11-27T16:23:42,339 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/.tmp/A/a15a3764e81c4d0098b90cc62741a466 is 175, key is test_row_0/A:col10/1732724621868/Put/seqid=0 2024-11-27T16:23:42,359 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:23:42,361 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=168 2024-11-27T16:23:42,361 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b. 2024-11-27T16:23:42,361 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b. as already flushing 2024-11-27T16:23:42,361 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b. 2024-11-27T16:23:42,361 ERROR [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] handler.RSProcedureHandler(58): pid=168 java.io.IOException: Unable to complete flush {ENCODED => 771007c821cc85e485653aceb22dba4b, NAME => 'TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:23:42,361 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=168 java.io.IOException: Unable to complete flush {ENCODED => 771007c821cc85e485653aceb22dba4b, NAME => 'TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:23:42,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4114): Remote procedure failed, pid=168 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 771007c821cc85e485653aceb22dba4b, NAME => 'TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 771007c821cc85e485653aceb22dba4b, NAME => 'TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:23:42,370 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742559_1735 (size=39949) 2024-11-27T16:23:42,507 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:42,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 214 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49390 deadline: 1732724682504, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:42,514 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:23:42,516 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=168 2024-11-27T16:23:42,516 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b. 2024-11-27T16:23:42,516 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b. as already flushing 2024-11-27T16:23:42,516 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b. 2024-11-27T16:23:42,516 ERROR [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] handler.RSProcedureHandler(58): pid=168 java.io.IOException: Unable to complete flush {ENCODED => 771007c821cc85e485653aceb22dba4b, NAME => 'TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:23:42,516 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=168 java.io.IOException: Unable to complete flush {ENCODED => 771007c821cc85e485653aceb22dba4b, NAME => 'TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:23:42,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4114): Remote procedure failed, pid=168 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 771007c821cc85e485653aceb22dba4b, NAME => 'TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 771007c821cc85e485653aceb22dba4b, NAME => 'TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:23:42,519 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:42,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 226 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49368 deadline: 1732724682516, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:42,668 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:23:42,669 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=168 2024-11-27T16:23:42,669 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b. 2024-11-27T16:23:42,669 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b. as already flushing 2024-11-27T16:23:42,670 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b. 2024-11-27T16:23:42,670 ERROR [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] handler.RSProcedureHandler(58): pid=168 java.io.IOException: Unable to complete flush {ENCODED => 771007c821cc85e485653aceb22dba4b, NAME => 'TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:23:42,670 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=168 java.io.IOException: Unable to complete flush {ENCODED => 771007c821cc85e485653aceb22dba4b, NAME => 'TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:23:42,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4114): Remote procedure failed, pid=168 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 771007c821cc85e485653aceb22dba4b, NAME => 'TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 771007c821cc85e485653aceb22dba4b, NAME => 'TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:23:42,772 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=315, memsize=55.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/.tmp/A/a15a3764e81c4d0098b90cc62741a466 2024-11-27T16:23:42,791 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/.tmp/B/a16323b3417c4865a927263cb158b876 is 50, key is test_row_0/B:col10/1732724621868/Put/seqid=0 2024-11-27T16:23:42,821 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:23:42,821 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=168 2024-11-27T16:23:42,821 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b. 2024-11-27T16:23:42,821 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b. as already flushing 2024-11-27T16:23:42,822 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b. 2024-11-27T16:23:42,822 ERROR [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] handler.RSProcedureHandler(58): pid=168 java.io.IOException: Unable to complete flush {ENCODED => 771007c821cc85e485653aceb22dba4b, NAME => 'TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:23:42,822 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=168 java.io.IOException: Unable to complete flush {ENCODED => 771007c821cc85e485653aceb22dba4b, NAME => 'TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:23:42,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4114): Remote procedure failed, pid=168 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 771007c821cc85e485653aceb22dba4b, NAME => 'TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 771007c821cc85e485653aceb22dba4b, NAME => 'TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:23:42,831 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742560_1736 (size=12301) 2024-11-27T16:23:42,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=167 2024-11-27T16:23:42,984 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:23:42,992 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=168 2024-11-27T16:23:42,992 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b. 2024-11-27T16:23:42,992 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b. as already flushing 2024-11-27T16:23:42,992 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b. 2024-11-27T16:23:42,992 ERROR [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] handler.RSProcedureHandler(58): pid=168 java.io.IOException: Unable to complete flush {ENCODED => 771007c821cc85e485653aceb22dba4b, NAME => 'TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:23:42,992 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=168 java.io.IOException: Unable to complete flush {ENCODED => 771007c821cc85e485653aceb22dba4b, NAME => 'TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:23:42,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4114): Remote procedure failed, pid=168 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 771007c821cc85e485653aceb22dba4b, NAME => 'TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 771007c821cc85e485653aceb22dba4b, NAME => 'TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:23:43,013 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:43,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 216 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49390 deadline: 1732724683012, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:43,025 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:43,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 228 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49368 deadline: 1732724683023, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:43,144 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:23:43,147 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=168 2024-11-27T16:23:43,147 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b. 2024-11-27T16:23:43,147 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b. as already flushing 2024-11-27T16:23:43,147 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b. 2024-11-27T16:23:43,147 ERROR [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] handler.RSProcedureHandler(58): pid=168 java.io.IOException: Unable to complete flush {ENCODED => 771007c821cc85e485653aceb22dba4b, NAME => 'TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:23:43,147 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=168 java.io.IOException: Unable to complete flush {ENCODED => 771007c821cc85e485653aceb22dba4b, NAME => 'TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:23:43,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4114): Remote procedure failed, pid=168 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 771007c821cc85e485653aceb22dba4b, NAME => 'TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 771007c821cc85e485653aceb22dba4b, NAME => 'TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:23:43,232 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=315 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/.tmp/B/a16323b3417c4865a927263cb158b876 2024-11-27T16:23:43,251 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/.tmp/C/ca24ad61ede74714a8c6dbdfcfc213f4 is 50, key is test_row_0/C:col10/1732724621868/Put/seqid=0 2024-11-27T16:23:43,272 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742561_1737 (size=12301) 2024-11-27T16:23:43,299 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:23:43,300 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=168 2024-11-27T16:23:43,300 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b. 2024-11-27T16:23:43,300 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b. as already flushing 2024-11-27T16:23:43,300 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b. 2024-11-27T16:23:43,300 ERROR [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] handler.RSProcedureHandler(58): pid=168 java.io.IOException: Unable to complete flush {ENCODED => 771007c821cc85e485653aceb22dba4b, NAME => 'TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:23:43,300 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=168 java.io.IOException: Unable to complete flush {ENCODED => 771007c821cc85e485653aceb22dba4b, NAME => 'TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:23:43,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4114): Remote procedure failed, pid=168 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 771007c821cc85e485653aceb22dba4b, NAME => 'TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 771007c821cc85e485653aceb22dba4b, NAME => 'TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:23:43,452 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:23:43,453 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=168 2024-11-27T16:23:43,453 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b. 2024-11-27T16:23:43,453 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b. as already flushing 2024-11-27T16:23:43,453 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b. 2024-11-27T16:23:43,453 ERROR [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] handler.RSProcedureHandler(58): pid=168 java.io.IOException: Unable to complete flush {ENCODED => 771007c821cc85e485653aceb22dba4b, NAME => 'TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:23:43,454 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=168 java.io.IOException: Unable to complete flush {ENCODED => 771007c821cc85e485653aceb22dba4b, NAME => 'TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:23:43,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4114): Remote procedure failed, pid=168 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 771007c821cc85e485653aceb22dba4b, NAME => 'TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 771007c821cc85e485653aceb22dba4b, NAME => 'TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:23:43,606 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:23:43,606 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=168 2024-11-27T16:23:43,606 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b. 2024-11-27T16:23:43,606 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b. as already flushing 2024-11-27T16:23:43,606 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b. 2024-11-27T16:23:43,606 ERROR [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] handler.RSProcedureHandler(58): pid=168 java.io.IOException: Unable to complete flush {ENCODED => 771007c821cc85e485653aceb22dba4b, NAME => 'TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:23:43,606 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=168 java.io.IOException: Unable to complete flush {ENCODED => 771007c821cc85e485653aceb22dba4b, NAME => 'TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:23:43,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4114): Remote procedure failed, pid=168 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 771007c821cc85e485653aceb22dba4b, NAME => 'TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 771007c821cc85e485653aceb22dba4b, NAME => 'TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T16:23:43,673 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=315 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/.tmp/C/ca24ad61ede74714a8c6dbdfcfc213f4 2024-11-27T16:23:43,680 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/.tmp/A/a15a3764e81c4d0098b90cc62741a466 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/A/a15a3764e81c4d0098b90cc62741a466 2024-11-27T16:23:43,685 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/A/a15a3764e81c4d0098b90cc62741a466, entries=200, sequenceid=315, filesize=39.0 K 2024-11-27T16:23:43,686 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/.tmp/B/a16323b3417c4865a927263cb158b876 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/B/a16323b3417c4865a927263cb158b876 2024-11-27T16:23:43,690 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/B/a16323b3417c4865a927263cb158b876, entries=150, sequenceid=315, filesize=12.0 K 2024-11-27T16:23:43,691 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/.tmp/C/ca24ad61ede74714a8c6dbdfcfc213f4 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/C/ca24ad61ede74714a8c6dbdfcfc213f4 2024-11-27T16:23:43,694 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/C/ca24ad61ede74714a8c6dbdfcfc213f4, entries=150, sequenceid=315, filesize=12.0 K 2024-11-27T16:23:43,695 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~167.72 KB/171750, heapSize ~440.16 KB/450720, currentSize=33.54 KB/34350 for 771007c821cc85e485653aceb22dba4b in 1827ms, sequenceid=315, compaction requested=true 2024-11-27T16:23:43,695 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 771007c821cc85e485653aceb22dba4b: 2024-11-27T16:23:43,696 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 771007c821cc85e485653aceb22dba4b:A, priority=-2147483648, current under compaction store size is 1 2024-11-27T16:23:43,696 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T16:23:43,696 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-27T16:23:43,696 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 771007c821cc85e485653aceb22dba4b:B, priority=-2147483648, current under compaction store size is 2 2024-11-27T16:23:43,696 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-27T16:23:43,696 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 771007c821cc85e485653aceb22dba4b:C, priority=-2147483648, current under compaction store size is 3 2024-11-27T16:23:43,696 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-11-27T16:23:43,696 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-27T16:23:43,697 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 142834 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-27T16:23:43,697 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HStore(1540): 771007c821cc85e485653aceb22dba4b/A is initiating minor compaction (all files) 2024-11-27T16:23:43,697 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 771007c821cc85e485653aceb22dba4b/A in TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b. 2024-11-27T16:23:43,697 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/A/086e5fbcee744012880e1c9cb5dcd58c, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/A/641461dfb3b74e5eb5a38137d18cac3e, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/A/72f4e26c83e7449492d48d7400a17e12, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/A/a15a3764e81c4d0098b90cc62741a466] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/.tmp, totalSize=139.5 K 2024-11-27T16:23:43,697 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=12 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b. 2024-11-27T16:23:43,697 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b. files: [hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/A/086e5fbcee744012880e1c9cb5dcd58c, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/A/641461dfb3b74e5eb5a38137d18cac3e, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/A/72f4e26c83e7449492d48d7400a17e12, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/A/a15a3764e81c4d0098b90cc62741a466] 2024-11-27T16:23:43,698 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 47190 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-27T16:23:43,698 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HStore(1540): 771007c821cc85e485653aceb22dba4b/B is initiating minor compaction (all files) 2024-11-27T16:23:43,698 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 771007c821cc85e485653aceb22dba4b/B in TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b. 2024-11-27T16:23:43,698 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/B/5df8f7b1208d4435a6df8a9353d63168, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/B/15c405ff2fa84cffbb247bf59339b576, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/B/7da9a7332db042acb5b9bd3828bedf1b, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/B/a16323b3417c4865a927263cb158b876] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/.tmp, totalSize=46.1 K 2024-11-27T16:23:43,699 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 086e5fbcee744012880e1c9cb5dcd58c, keycount=150, bloomtype=ROW, size=30.9 K, encoding=NONE, compression=NONE, seqNum=248, earliestPutTs=1732724617624 2024-11-27T16:23:43,699 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting 5df8f7b1208d4435a6df8a9353d63168, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=248, earliestPutTs=1732724617624 2024-11-27T16:23:43,700 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 641461dfb3b74e5eb5a38137d18cac3e, keycount=250, bloomtype=ROW, size=47.5 K, encoding=NONE, compression=NONE, seqNum=275, earliestPutTs=1732724618839 2024-11-27T16:23:43,700 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting 15c405ff2fa84cffbb247bf59339b576, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=275, earliestPutTs=1732724618839 2024-11-27T16:23:43,700 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 72f4e26c83e7449492d48d7400a17e12, keycount=100, bloomtype=ROW, size=22.0 K, encoding=NONE, compression=NONE, seqNum=287, earliestPutTs=1732724619513 2024-11-27T16:23:43,700 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.Compactor(224): Compacting a15a3764e81c4d0098b90cc62741a466, keycount=200, bloomtype=ROW, size=39.0 K, encoding=NONE, compression=NONE, seqNum=315, earliestPutTs=1732724620737 2024-11-27T16:23:43,701 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting 7da9a7332db042acb5b9bd3828bedf1b, keycount=100, bloomtype=ROW, size=9.6 K, encoding=NONE, compression=NONE, seqNum=287, earliestPutTs=1732724619513 2024-11-27T16:23:43,701 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting a16323b3417c4865a927263cb158b876, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=315, earliestPutTs=1732724620737 2024-11-27T16:23:43,709 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=771007c821cc85e485653aceb22dba4b] 2024-11-27T16:23:43,728 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 771007c821cc85e485653aceb22dba4b#B#compaction#629 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-27T16:23:43,729 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/.tmp/B/d4445e0d37604ee398b6f41675091827 is 50, key is test_row_0/B:col10/1732724621868/Put/seqid=0 2024-11-27T16:23:43,732 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e2024112707031fa946614db08d591eefb1ff51da_771007c821cc85e485653aceb22dba4b store=[table=TestAcidGuarantees family=A region=771007c821cc85e485653aceb22dba4b] 2024-11-27T16:23:43,735 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e2024112707031fa946614db08d591eefb1ff51da_771007c821cc85e485653aceb22dba4b, store=[table=TestAcidGuarantees family=A region=771007c821cc85e485653aceb22dba4b] 2024-11-27T16:23:43,735 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112707031fa946614db08d591eefb1ff51da_771007c821cc85e485653aceb22dba4b because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=771007c821cc85e485653aceb22dba4b] 2024-11-27T16:23:43,754 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742562_1738 (size=13017) 2024-11-27T16:23:43,759 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:23:43,764 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=168 2024-11-27T16:23:43,764 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b. 2024-11-27T16:23:43,764 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegion(2837): Flushing 771007c821cc85e485653aceb22dba4b 3/3 column families, dataSize=33.54 KB heapSize=88.64 KB 2024-11-27T16:23:43,764 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 771007c821cc85e485653aceb22dba4b, store=A 2024-11-27T16:23:43,764 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:23:43,764 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 771007c821cc85e485653aceb22dba4b, store=B 2024-11-27T16:23:43,765 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:23:43,765 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 771007c821cc85e485653aceb22dba4b, store=C 2024-11-27T16:23:43,765 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:23:43,772 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/.tmp/B/d4445e0d37604ee398b6f41675091827 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/B/d4445e0d37604ee398b6f41675091827 2024-11-27T16:23:43,777 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742563_1739 (size=4469) 2024-11-27T16:23:43,777 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 771007c821cc85e485653aceb22dba4b/B of 771007c821cc85e485653aceb22dba4b into d4445e0d37604ee398b6f41675091827(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T16:23:43,777 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 771007c821cc85e485653aceb22dba4b: 2024-11-27T16:23:43,777 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b., storeName=771007c821cc85e485653aceb22dba4b/B, priority=12, startTime=1732724623696; duration=0sec 2024-11-27T16:23:43,778 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-27T16:23:43,778 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 771007c821cc85e485653aceb22dba4b:B 2024-11-27T16:23:43,778 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-27T16:23:43,781 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 771007c821cc85e485653aceb22dba4b#A#compaction#628 average throughput is 0.34 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-27T16:23:43,782 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/.tmp/A/1e33543b4c6847a2af0b6229f9d12100 is 175, key is test_row_0/A:col10/1732724621868/Put/seqid=0 2024-11-27T16:23:43,783 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 47156 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-27T16:23:43,783 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HStore(1540): 771007c821cc85e485653aceb22dba4b/C is initiating minor compaction (all files) 2024-11-27T16:23:43,783 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 771007c821cc85e485653aceb22dba4b/C in TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b. 2024-11-27T16:23:43,784 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/C/3f551bcd13b34cdfb73fd8a379a677a5, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/C/6807a17188454156994f7ec66616e590, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/C/647331dde44b47ef8f4165c366037971, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/C/ca24ad61ede74714a8c6dbdfcfc213f4] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/.tmp, totalSize=46.1 K 2024-11-27T16:23:43,787 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting 3f551bcd13b34cdfb73fd8a379a677a5, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=248, earliestPutTs=1732724617624 2024-11-27T16:23:43,787 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting 6807a17188454156994f7ec66616e590, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=275, earliestPutTs=1732724618839 2024-11-27T16:23:43,787 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting 647331dde44b47ef8f4165c366037971, keycount=100, bloomtype=ROW, size=9.6 K, encoding=NONE, compression=NONE, seqNum=287, earliestPutTs=1732724619513 2024-11-27T16:23:43,788 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting ca24ad61ede74714a8c6dbdfcfc213f4, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=315, earliestPutTs=1732724620737 2024-11-27T16:23:43,793 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241127fe0e3dfc262f4486a086370f9ccc259b_771007c821cc85e485653aceb22dba4b is 50, key is test_row_0/A:col10/1732724621878/Put/seqid=0 2024-11-27T16:23:43,800 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 771007c821cc85e485653aceb22dba4b#C#compaction#631 average throughput is 2.18 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-27T16:23:43,803 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/.tmp/C/1ca077f1e2af410fa3dbabf635a6b0f7 is 50, key is test_row_0/C:col10/1732724621868/Put/seqid=0 2024-11-27T16:23:43,823 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742564_1740 (size=31971) 2024-11-27T16:23:43,837 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/.tmp/A/1e33543b4c6847a2af0b6229f9d12100 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/A/1e33543b4c6847a2af0b6229f9d12100 2024-11-27T16:23:43,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=167 2024-11-27T16:23:43,844 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742565_1741 (size=12454) 2024-11-27T16:23:43,846 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 771007c821cc85e485653aceb22dba4b/A of 771007c821cc85e485653aceb22dba4b into 1e33543b4c6847a2af0b6229f9d12100(size=31.2 K), total size for store is 31.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T16:23:43,846 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 771007c821cc85e485653aceb22dba4b: 2024-11-27T16:23:43,846 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b., storeName=771007c821cc85e485653aceb22dba4b/A, priority=12, startTime=1732724623695; duration=0sec 2024-11-27T16:23:43,846 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T16:23:43,846 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 771007c821cc85e485653aceb22dba4b:A 2024-11-27T16:23:43,867 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742566_1742 (size=12983) 2024-11-27T16:23:43,875 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/.tmp/C/1ca077f1e2af410fa3dbabf635a6b0f7 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/C/1ca077f1e2af410fa3dbabf635a6b0f7 2024-11-27T16:23:43,880 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 771007c821cc85e485653aceb22dba4b/C of 771007c821cc85e485653aceb22dba4b into 1ca077f1e2af410fa3dbabf635a6b0f7(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T16:23:43,880 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 771007c821cc85e485653aceb22dba4b: 2024-11-27T16:23:43,880 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b., storeName=771007c821cc85e485653aceb22dba4b/C, priority=12, startTime=1732724623696; duration=0sec 2024-11-27T16:23:43,881 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T16:23:43,881 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 771007c821cc85e485653aceb22dba4b:C 2024-11-27T16:23:44,033 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b. as already flushing 2024-11-27T16:23:44,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(8581): Flush requested on 771007c821cc85e485653aceb22dba4b 2024-11-27T16:23:44,107 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:44,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 240 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49368 deadline: 1732724684104, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:44,108 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:44,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 233 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49390 deadline: 1732724684107, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:44,212 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:44,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 242 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49368 deadline: 1732724684209, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:44,219 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:44,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 235 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49390 deadline: 1732724684216, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:44,245 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:44,248 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241127fe0e3dfc262f4486a086370f9ccc259b_771007c821cc85e485653aceb22dba4b to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241127fe0e3dfc262f4486a086370f9ccc259b_771007c821cc85e485653aceb22dba4b 2024-11-27T16:23:44,249 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/.tmp/A/eb129477e78e460dad8355f263f9816f, store: [table=TestAcidGuarantees family=A region=771007c821cc85e485653aceb22dba4b] 2024-11-27T16:23:44,250 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/.tmp/A/eb129477e78e460dad8355f263f9816f is 175, key is test_row_0/A:col10/1732724621878/Put/seqid=0 2024-11-27T16:23:44,284 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742567_1743 (size=31255) 2024-11-27T16:23:44,416 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:44,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 244 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49368 deadline: 1732724684414, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:44,435 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:44,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 237 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49390 deadline: 1732724684433, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:44,684 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=323, memsize=11.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/.tmp/A/eb129477e78e460dad8355f263f9816f 2024-11-27T16:23:44,704 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/.tmp/B/5421e2ad3b43414292038387b23d9297 is 50, key is test_row_0/B:col10/1732724621878/Put/seqid=0 2024-11-27T16:23:44,724 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:44,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 246 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49368 deadline: 1732724684723, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:44,734 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742568_1744 (size=12301) 2024-11-27T16:23:44,740 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=323 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/.tmp/B/5421e2ad3b43414292038387b23d9297 2024-11-27T16:23:44,744 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:44,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 239 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49390 deadline: 1732724684741, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:44,753 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/.tmp/C/252a4daa85e74c329ad71533894d5298 is 50, key is test_row_0/C:col10/1732724621878/Put/seqid=0 2024-11-27T16:23:44,799 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742569_1745 (size=12301) 2024-11-27T16:23:44,808 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=323 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/.tmp/C/252a4daa85e74c329ad71533894d5298 2024-11-27T16:23:44,816 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/.tmp/A/eb129477e78e460dad8355f263f9816f as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/A/eb129477e78e460dad8355f263f9816f 2024-11-27T16:23:44,822 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/A/eb129477e78e460dad8355f263f9816f, entries=150, sequenceid=323, filesize=30.5 K 2024-11-27T16:23:44,823 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/.tmp/B/5421e2ad3b43414292038387b23d9297 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/B/5421e2ad3b43414292038387b23d9297 2024-11-27T16:23:44,826 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/B/5421e2ad3b43414292038387b23d9297, entries=150, sequenceid=323, filesize=12.0 K 2024-11-27T16:23:44,828 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/.tmp/C/252a4daa85e74c329ad71533894d5298 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/C/252a4daa85e74c329ad71533894d5298 2024-11-27T16:23:44,831 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/C/252a4daa85e74c329ad71533894d5298, entries=150, sequenceid=323, filesize=12.0 K 2024-11-27T16:23:44,832 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegion(3040): Finished flush of dataSize ~33.54 KB/34350, heapSize ~88.59 KB/90720, currentSize=167.72 KB/171750 for 771007c821cc85e485653aceb22dba4b in 1068ms, sequenceid=323, compaction requested=false 2024-11-27T16:23:44,832 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegion(2538): Flush status journal for 771007c821cc85e485653aceb22dba4b: 2024-11-27T16:23:44,832 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b. 2024-11-27T16:23:44,832 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=168 2024-11-27T16:23:44,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4106): Remote procedure done, pid=168 2024-11-27T16:23:44,835 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=168, resume processing ppid=167 2024-11-27T16:23:44,835 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=168, ppid=167, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 3.1010 sec 2024-11-27T16:23:44,837 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=167, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=167, table=TestAcidGuarantees in 3.1070 sec 2024-11-27T16:23:45,234 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 771007c821cc85e485653aceb22dba4b 3/3 column families, dataSize=174.43 KB heapSize=457.78 KB 2024-11-27T16:23:45,234 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 771007c821cc85e485653aceb22dba4b, store=A 2024-11-27T16:23:45,234 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:23:45,234 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 771007c821cc85e485653aceb22dba4b, store=B 2024-11-27T16:23:45,234 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:23:45,234 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 771007c821cc85e485653aceb22dba4b, store=C 2024-11-27T16:23:45,235 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:23:45,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(8581): Flush requested on 771007c821cc85e485653aceb22dba4b 2024-11-27T16:23:45,258 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:45,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 253 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49368 deadline: 1732724685255, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:45,259 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112739e7121eea6543ba9eec09a72290f354_771007c821cc85e485653aceb22dba4b is 50, key is test_row_0/A:col10/1732724624097/Put/seqid=0 2024-11-27T16:23:45,264 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:45,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 241 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49390 deadline: 1732724685260, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:45,325 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742570_1746 (size=14994) 2024-11-27T16:23:45,368 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:45,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 255 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49368 deadline: 1732724685366, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:45,571 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:45,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 257 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49368 deadline: 1732724685570, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:45,621 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:45,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49380 deadline: 1732724685620, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:45,622 DEBUG [Thread-2861 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=8, retries=16, started=18165 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b., hostname=7b191dec6496,44169,1732724452967, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-27T16:23:45,635 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:45,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49412 deadline: 1732724685631, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:45,635 DEBUG [Thread-2853 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=8, retries=16, started=18181 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b., hostname=7b191dec6496,44169,1732724452967, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-27T16:23:45,687 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:45,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49348 deadline: 1732724685683, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:45,687 DEBUG [Thread-2857 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=8, retries=16, started=18231 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b., hostname=7b191dec6496,44169,1732724452967, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-27T16:23:45,727 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:45,732 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112739e7121eea6543ba9eec09a72290f354_771007c821cc85e485653aceb22dba4b to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112739e7121eea6543ba9eec09a72290f354_771007c821cc85e485653aceb22dba4b 2024-11-27T16:23:45,733 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/.tmp/A/3fa25bc9553e465aa8a647df29ce0db8, store: [table=TestAcidGuarantees family=A region=771007c821cc85e485653aceb22dba4b] 2024-11-27T16:23:45,734 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/.tmp/A/3fa25bc9553e465aa8a647df29ce0db8 is 175, key is test_row_0/A:col10/1732724624097/Put/seqid=0 2024-11-27T16:23:45,757 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742571_1747 (size=39949) 2024-11-27T16:23:45,780 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=355, memsize=58.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/.tmp/A/3fa25bc9553e465aa8a647df29ce0db8 2024-11-27T16:23:45,802 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/.tmp/B/1643bc3e8acb4886ba8b50784fec9fc1 is 50, key is test_row_0/B:col10/1732724624097/Put/seqid=0 2024-11-27T16:23:45,822 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742572_1748 (size=12301) 2024-11-27T16:23:45,826 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=58.14 KB at sequenceid=355 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/.tmp/B/1643bc3e8acb4886ba8b50784fec9fc1 2024-11-27T16:23:45,835 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/.tmp/C/2dfaf41f009c48c9b0520c21d13a4a47 is 50, key is test_row_0/C:col10/1732724624097/Put/seqid=0 2024-11-27T16:23:45,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=167 2024-11-27T16:23:45,842 INFO [Thread-2863 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 167 completed 2024-11-27T16:23:45,843 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-27T16:23:45,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] procedure2.ProcedureExecutor(1098): Stored pid=169, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=169, table=TestAcidGuarantees 2024-11-27T16:23:45,845 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=169, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=169, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-27T16:23:45,846 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=169, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=169, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-27T16:23:45,846 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=170, ppid=169, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-27T16:23:45,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=169 2024-11-27T16:23:45,871 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742573_1749 (size=12301) 2024-11-27T16:23:45,873 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T16:23:45,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44169 {}] ipc.CallRunner(138): callId: 259 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49368 deadline: 1732724685872, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 2024-11-27T16:23:45,884 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=58.14 KB at sequenceid=355 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/.tmp/C/2dfaf41f009c48c9b0520c21d13a4a47 2024-11-27T16:23:45,891 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/.tmp/A/3fa25bc9553e465aa8a647df29ce0db8 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/A/3fa25bc9553e465aa8a647df29ce0db8 2024-11-27T16:23:45,896 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/A/3fa25bc9553e465aa8a647df29ce0db8, entries=200, sequenceid=355, filesize=39.0 K 2024-11-27T16:23:45,897 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/.tmp/B/1643bc3e8acb4886ba8b50784fec9fc1 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/B/1643bc3e8acb4886ba8b50784fec9fc1 2024-11-27T16:23:45,901 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/B/1643bc3e8acb4886ba8b50784fec9fc1, entries=150, sequenceid=355, filesize=12.0 K 2024-11-27T16:23:45,902 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/.tmp/C/2dfaf41f009c48c9b0520c21d13a4a47 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/C/2dfaf41f009c48c9b0520c21d13a4a47 2024-11-27T16:23:45,906 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/C/2dfaf41f009c48c9b0520c21d13a4a47, entries=150, sequenceid=355, filesize=12.0 K 2024-11-27T16:23:45,907 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~174.43 KB/178620, heapSize ~457.73 KB/468720, currentSize=26.84 KB/27480 for 771007c821cc85e485653aceb22dba4b in 673ms, sequenceid=355, compaction requested=true 2024-11-27T16:23:45,907 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 771007c821cc85e485653aceb22dba4b: 2024-11-27T16:23:45,907 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 771007c821cc85e485653aceb22dba4b:A, priority=-2147483648, current under compaction store size is 1 2024-11-27T16:23:45,907 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T16:23:45,907 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 771007c821cc85e485653aceb22dba4b:B, priority=-2147483648, current under compaction store size is 2 2024-11-27T16:23:45,907 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-27T16:23:45,907 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-27T16:23:45,907 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 771007c821cc85e485653aceb22dba4b:C, priority=-2147483648, current under compaction store size is 3 2024-11-27T16:23:45,907 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-11-27T16:23:45,907 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-27T16:23:45,908 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 103175 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-27T16:23:45,908 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HStore(1540): 771007c821cc85e485653aceb22dba4b/A is initiating minor compaction (all files) 2024-11-27T16:23:45,908 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 771007c821cc85e485653aceb22dba4b/A in TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b. 2024-11-27T16:23:45,908 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/A/1e33543b4c6847a2af0b6229f9d12100, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/A/eb129477e78e460dad8355f263f9816f, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/A/3fa25bc9553e465aa8a647df29ce0db8] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/.tmp, totalSize=100.8 K 2024-11-27T16:23:45,908 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b. 2024-11-27T16:23:45,908 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b. files: [hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/A/1e33543b4c6847a2af0b6229f9d12100, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/A/eb129477e78e460dad8355f263f9816f, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/A/3fa25bc9553e465aa8a647df29ce0db8] 2024-11-27T16:23:45,909 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37619 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-27T16:23:45,909 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HStore(1540): 771007c821cc85e485653aceb22dba4b/B is initiating minor compaction (all files) 2024-11-27T16:23:45,909 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 771007c821cc85e485653aceb22dba4b/B in TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b. 2024-11-27T16:23:45,909 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/B/d4445e0d37604ee398b6f41675091827, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/B/5421e2ad3b43414292038387b23d9297, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/B/1643bc3e8acb4886ba8b50784fec9fc1] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/.tmp, totalSize=36.7 K 2024-11-27T16:23:45,909 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting 1e33543b4c6847a2af0b6229f9d12100, keycount=150, bloomtype=ROW, size=31.2 K, encoding=NONE, compression=NONE, seqNum=315, earliestPutTs=1732724620737 2024-11-27T16:23:45,909 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.Compactor(224): Compacting d4445e0d37604ee398b6f41675091827, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=315, earliestPutTs=1732724620737 2024-11-27T16:23:45,910 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting eb129477e78e460dad8355f263f9816f, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=323, earliestPutTs=1732724621871 2024-11-27T16:23:45,910 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5421e2ad3b43414292038387b23d9297, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=323, earliestPutTs=1732724621871 2024-11-27T16:23:45,910 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] compactions.Compactor(224): Compacting 3fa25bc9553e465aa8a647df29ce0db8, keycount=200, bloomtype=ROW, size=39.0 K, encoding=NONE, compression=NONE, seqNum=355, earliestPutTs=1732724624078 2024-11-27T16:23:45,911 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1643bc3e8acb4886ba8b50784fec9fc1, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=355, earliestPutTs=1732724624097 2024-11-27T16:23:45,926 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 771007c821cc85e485653aceb22dba4b#B#compaction#637 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-27T16:23:45,926 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/.tmp/B/2420c1a2d94349e885b8b53a3999a2be is 50, key is test_row_0/B:col10/1732724624097/Put/seqid=0 2024-11-27T16:23:45,944 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=771007c821cc85e485653aceb22dba4b] 2024-11-27T16:23:45,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=169 2024-11-27T16:23:45,956 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e2024112731d3827bea8f429db5521fc696c524c0_771007c821cc85e485653aceb22dba4b store=[table=TestAcidGuarantees family=A region=771007c821cc85e485653aceb22dba4b] 2024-11-27T16:23:45,959 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e2024112731d3827bea8f429db5521fc696c524c0_771007c821cc85e485653aceb22dba4b, store=[table=TestAcidGuarantees family=A region=771007c821cc85e485653aceb22dba4b] 2024-11-27T16:23:45,959 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112731d3827bea8f429db5521fc696c524c0_771007c821cc85e485653aceb22dba4b because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=771007c821cc85e485653aceb22dba4b] 2024-11-27T16:23:45,984 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742574_1750 (size=13119) 2024-11-27T16:23:45,991 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/.tmp/B/2420c1a2d94349e885b8b53a3999a2be as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/B/2420c1a2d94349e885b8b53a3999a2be 2024-11-27T16:23:45,998 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:23:45,999 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44169 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=170 2024-11-27T16:23:45,999 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b. 2024-11-27T16:23:46,000 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HRegion(2837): Flushing 771007c821cc85e485653aceb22dba4b 3/3 column families, dataSize=26.84 KB heapSize=71.06 KB 2024-11-27T16:23:46,002 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 771007c821cc85e485653aceb22dba4b, store=A 2024-11-27T16:23:46,002 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 771007c821cc85e485653aceb22dba4b/B of 771007c821cc85e485653aceb22dba4b into 2420c1a2d94349e885b8b53a3999a2be(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T16:23:46,002 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 771007c821cc85e485653aceb22dba4b: 2024-11-27T16:23:46,002 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:23:46,002 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b., storeName=771007c821cc85e485653aceb22dba4b/B, priority=13, startTime=1732724625907; duration=0sec 2024-11-27T16:23:46,002 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 771007c821cc85e485653aceb22dba4b, store=B 2024-11-27T16:23:46,002 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:23:46,002 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 771007c821cc85e485653aceb22dba4b, store=C 2024-11-27T16:23:46,002 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:23:46,002 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-27T16:23:46,002 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 771007c821cc85e485653aceb22dba4b:B 2024-11-27T16:23:46,002 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-27T16:23:46,004 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37585 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-27T16:23:46,004 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HStore(1540): 771007c821cc85e485653aceb22dba4b/C is initiating minor compaction (all files) 2024-11-27T16:23:46,004 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 771007c821cc85e485653aceb22dba4b/C in TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b. 2024-11-27T16:23:46,004 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/C/1ca077f1e2af410fa3dbabf635a6b0f7, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/C/252a4daa85e74c329ad71533894d5298, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/C/2dfaf41f009c48c9b0520c21d13a4a47] into tmpdir=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/.tmp, totalSize=36.7 K 2024-11-27T16:23:46,004 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1ca077f1e2af410fa3dbabf635a6b0f7, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=315, earliestPutTs=1732724620737 2024-11-27T16:23:46,005 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 252a4daa85e74c329ad71533894d5298, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=323, earliestPutTs=1732724621871 2024-11-27T16:23:46,005 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2dfaf41f009c48c9b0520c21d13a4a47, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=355, earliestPutTs=1732724624097 2024-11-27T16:23:46,022 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742575_1751 (size=4469) 2024-11-27T16:23:46,024 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 771007c821cc85e485653aceb22dba4b#C#compaction#639 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-27T16:23:46,024 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/.tmp/C/bc7e8946091e4fd3b536f1ee42e3908c is 50, key is test_row_0/C:col10/1732724624097/Put/seqid=0 2024-11-27T16:23:46,025 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 771007c821cc85e485653aceb22dba4b#A#compaction#638 average throughput is 0.30 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-27T16:23:46,025 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/.tmp/A/716d25e8896c4ff6b81e1150d28f004e is 175, key is test_row_0/A:col10/1732724624097/Put/seqid=0 2024-11-27T16:23:46,035 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=170}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241127a5559ade3a18460a9f83f83f69396855_771007c821cc85e485653aceb22dba4b is 50, key is test_row_0/A:col10/1732724625248/Put/seqid=0 2024-11-27T16:23:46,078 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742576_1752 (size=13085) 2024-11-27T16:23:46,083 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/.tmp/C/bc7e8946091e4fd3b536f1ee42e3908c as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/C/bc7e8946091e4fd3b536f1ee42e3908c 2024-11-27T16:23:46,089 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 771007c821cc85e485653aceb22dba4b/C of 771007c821cc85e485653aceb22dba4b into bc7e8946091e4fd3b536f1ee42e3908c(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T16:23:46,089 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 771007c821cc85e485653aceb22dba4b: 2024-11-27T16:23:46,089 INFO [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b., storeName=771007c821cc85e485653aceb22dba4b/C, priority=13, startTime=1732724625907; duration=0sec 2024-11-27T16:23:46,089 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T16:23:46,089 DEBUG [RS:0;7b191dec6496:44169-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 771007c821cc85e485653aceb22dba4b:C 2024-11-27T16:23:46,090 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742577_1753 (size=32073) 2024-11-27T16:23:46,096 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/.tmp/A/716d25e8896c4ff6b81e1150d28f004e as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/A/716d25e8896c4ff6b81e1150d28f004e 2024-11-27T16:23:46,100 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 771007c821cc85e485653aceb22dba4b/A of 771007c821cc85e485653aceb22dba4b into 716d25e8896c4ff6b81e1150d28f004e(size=31.3 K), total size for store is 31.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T16:23:46,100 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 771007c821cc85e485653aceb22dba4b: 2024-11-27T16:23:46,100 INFO [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b., storeName=771007c821cc85e485653aceb22dba4b/A, priority=13, startTime=1732724625907; duration=0sec 2024-11-27T16:23:46,100 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T16:23:46,100 DEBUG [RS:0;7b191dec6496:44169-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 771007c821cc85e485653aceb22dba4b:A 2024-11-27T16:23:46,121 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742578_1754 (size=12454) 2024-11-27T16:23:46,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=169 2024-11-27T16:23:46,270 DEBUG [Thread-2864 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x205568ef to 127.0.0.1:51088 2024-11-27T16:23:46,270 DEBUG [Thread-2868 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x675cc1c7 to 127.0.0.1:51088 2024-11-27T16:23:46,270 DEBUG [Thread-2864 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-27T16:23:46,270 DEBUG [Thread-2868 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-27T16:23:46,271 DEBUG [Thread-2855 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x031adbce to 127.0.0.1:51088 2024-11-27T16:23:46,271 DEBUG [Thread-2855 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-27T16:23:46,273 DEBUG [Thread-2870 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x73e9c98b to 127.0.0.1:51088 2024-11-27T16:23:46,273 DEBUG [Thread-2872 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x60507b8f to 127.0.0.1:51088 2024-11-27T16:23:46,273 DEBUG [Thread-2872 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-27T16:23:46,273 DEBUG [Thread-2870 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-27T16:23:46,277 DEBUG [Thread-2866 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x1c6fde8c to 127.0.0.1:51088 2024-11-27T16:23:46,277 DEBUG [Thread-2866 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-27T16:23:46,376 DEBUG [Thread-2859 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x574dd3aa to 127.0.0.1:51088 2024-11-27T16:23:46,376 DEBUG [Thread-2859 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-27T16:23:46,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=169 2024-11-27T16:23:46,522 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=170}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:46,525 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241127a5559ade3a18460a9f83f83f69396855_771007c821cc85e485653aceb22dba4b to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241127a5559ade3a18460a9f83f83f69396855_771007c821cc85e485653aceb22dba4b 2024-11-27T16:23:46,526 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=170}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/.tmp/A/adbd4fcb7c644d92a9690f47d2a16c0c, store: [table=TestAcidGuarantees family=A region=771007c821cc85e485653aceb22dba4b] 2024-11-27T16:23:46,527 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=170}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/.tmp/A/adbd4fcb7c644d92a9690f47d2a16c0c is 175, key is test_row_0/A:col10/1732724625248/Put/seqid=0 2024-11-27T16:23:46,530 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742579_1755 (size=31255) 2024-11-27T16:23:46,931 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=170}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=363, memsize=8.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/.tmp/A/adbd4fcb7c644d92a9690f47d2a16c0c 2024-11-27T16:23:46,939 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=170}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/.tmp/B/1ac7319ba0fc41f7afbfae6b771623e3 is 50, key is test_row_0/B:col10/1732724625248/Put/seqid=0 2024-11-27T16:23:46,957 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742580_1756 (size=12301) 2024-11-27T16:23:46,958 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.95 KB at sequenceid=363 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/.tmp/B/1ac7319ba0fc41f7afbfae6b771623e3 2024-11-27T16:23:46,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=169 2024-11-27T16:23:46,968 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=170}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/.tmp/C/63ad31bd244b47d5b111a6afdb662f60 is 50, key is test_row_0/C:col10/1732724625248/Put/seqid=0 2024-11-27T16:23:46,989 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742581_1757 (size=12301) 2024-11-27T16:23:47,390 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.95 KB at sequenceid=363 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/.tmp/C/63ad31bd244b47d5b111a6afdb662f60 2024-11-27T16:23:47,394 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/.tmp/A/adbd4fcb7c644d92a9690f47d2a16c0c as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/A/adbd4fcb7c644d92a9690f47d2a16c0c 2024-11-27T16:23:47,397 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/A/adbd4fcb7c644d92a9690f47d2a16c0c, entries=150, sequenceid=363, filesize=30.5 K 2024-11-27T16:23:47,398 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/.tmp/B/1ac7319ba0fc41f7afbfae6b771623e3 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/B/1ac7319ba0fc41f7afbfae6b771623e3 2024-11-27T16:23:47,402 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/B/1ac7319ba0fc41f7afbfae6b771623e3, entries=150, sequenceid=363, filesize=12.0 K 2024-11-27T16:23:47,402 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/.tmp/C/63ad31bd244b47d5b111a6afdb662f60 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/C/63ad31bd244b47d5b111a6afdb662f60 2024-11-27T16:23:47,406 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/C/63ad31bd244b47d5b111a6afdb662f60, entries=150, sequenceid=363, filesize=12.0 K 2024-11-27T16:23:47,407 INFO [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HRegion(3040): Finished flush of dataSize ~26.84 KB/27480, heapSize ~71.02 KB/72720, currentSize=13.42 KB/13740 for 771007c821cc85e485653aceb22dba4b in 1407ms, sequenceid=363, compaction requested=false 2024-11-27T16:23:47,407 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HRegion(2538): Flush status journal for 771007c821cc85e485653aceb22dba4b: 2024-11-27T16:23:47,407 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b. 2024-11-27T16:23:47,407 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b191dec6496:0-2 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=170 2024-11-27T16:23:47,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster(4106): Remote procedure done, pid=170 2024-11-27T16:23:47,410 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=170, resume processing ppid=169 2024-11-27T16:23:47,410 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=170, ppid=169, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.5620 sec 2024-11-27T16:23:47,411 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=169, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=169, table=TestAcidGuarantees in 1.5670 sec 2024-11-27T16:23:47,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=169 2024-11-27T16:23:47,963 INFO [Thread-2863 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 169 completed 2024-11-27T16:23:51,293 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-27T16:23:55,628 DEBUG [Thread-2861 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x29247c18 to 127.0.0.1:51088 2024-11-27T16:23:55,628 DEBUG [Thread-2861 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-27T16:23:55,692 DEBUG [Thread-2857 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x4276b1e9 to 127.0.0.1:51088 2024-11-27T16:23:55,692 DEBUG [Thread-2857 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-27T16:23:55,697 DEBUG [Thread-2853 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x095ad211 to 127.0.0.1:51088 2024-11-27T16:23:55,697 DEBUG [Thread-2853 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-27T16:23:55,697 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(392): Finished test. Writers: 2024-11-27T16:23:55,697 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 4 2024-11-27T16:23:55,697 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 122 2024-11-27T16:23:55,697 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 21 2024-11-27T16:23:55,697 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 134 2024-11-27T16:23:55,698 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 8 2024-11-27T16:23:55,698 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(396): Readers: 2024-11-27T16:23:55,698 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 4532 2024-11-27T16:23:55,698 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 4422 2024-11-27T16:23:55,698 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 4423 2024-11-27T16:23:55,698 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 4544 2024-11-27T16:23:55,698 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 4393 2024-11-27T16:23:55,698 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(400): Scanners: 2024-11-27T16:23:55,698 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-11-27T16:23:55,698 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x366de26d to 127.0.0.1:51088 2024-11-27T16:23:55,698 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-27T16:23:55,698 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of TestAcidGuarantees 2024-11-27T16:23:55,698 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable TestAcidGuarantees 2024-11-27T16:23:55,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] procedure2.ProcedureExecutor(1098): Stored pid=171, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=TestAcidGuarantees 2024-11-27T16:23:55,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=171 2024-11-27T16:23:55,701 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732724635701"}]},"ts":"1732724635701"} 2024-11-27T16:23:55,701 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLING in hbase:meta 2024-11-27T16:23:55,704 INFO [PEWorker-2 {}] procedure.DisableTableProcedure(284): Set TestAcidGuarantees to state=DISABLING 2024-11-27T16:23:55,704 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=172, ppid=171, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=TestAcidGuarantees}] 2024-11-27T16:23:55,705 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=173, ppid=172, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=771007c821cc85e485653aceb22dba4b, UNASSIGN}] 2024-11-27T16:23:55,705 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=173, ppid=172, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=771007c821cc85e485653aceb22dba4b, UNASSIGN 2024-11-27T16:23:55,706 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=173 updating hbase:meta row=771007c821cc85e485653aceb22dba4b, regionState=CLOSING, regionLocation=7b191dec6496,44169,1732724452967 2024-11-27T16:23:55,707 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-27T16:23:55,707 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=174, ppid=173, state=RUNNABLE; CloseRegionProcedure 771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967}] 2024-11-27T16:23:55,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=171 2024-11-27T16:23:55,858 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 7b191dec6496,44169,1732724452967 2024-11-27T16:23:55,858 INFO [RS_CLOSE_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_CLOSE_REGION, pid=174}] handler.UnassignRegionHandler(124): Close 771007c821cc85e485653aceb22dba4b 2024-11-27T16:23:55,858 DEBUG [RS_CLOSE_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_CLOSE_REGION, pid=174}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-11-27T16:23:55,858 DEBUG [RS_CLOSE_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_CLOSE_REGION, pid=174}] regionserver.HRegion(1681): Closing 771007c821cc85e485653aceb22dba4b, disabling compactions & flushes 2024-11-27T16:23:55,858 INFO [RS_CLOSE_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_CLOSE_REGION, pid=174}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b. 2024-11-27T16:23:55,858 DEBUG [RS_CLOSE_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_CLOSE_REGION, pid=174}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b. 2024-11-27T16:23:55,858 DEBUG [RS_CLOSE_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_CLOSE_REGION, pid=174}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b. after waiting 0 ms 2024-11-27T16:23:55,858 DEBUG [RS_CLOSE_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_CLOSE_REGION, pid=174}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b. 2024-11-27T16:23:55,859 INFO [RS_CLOSE_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_CLOSE_REGION, pid=174}] regionserver.HRegion(2837): Flushing 771007c821cc85e485653aceb22dba4b 3/3 column families, dataSize=33.54 KB heapSize=88.64 KB 2024-11-27T16:23:55,859 DEBUG [RS_CLOSE_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_CLOSE_REGION, pid=174}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 771007c821cc85e485653aceb22dba4b, store=A 2024-11-27T16:23:55,859 DEBUG [RS_CLOSE_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_CLOSE_REGION, pid=174}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:23:55,859 DEBUG [RS_CLOSE_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_CLOSE_REGION, pid=174}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 771007c821cc85e485653aceb22dba4b, store=B 2024-11-27T16:23:55,859 DEBUG [RS_CLOSE_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_CLOSE_REGION, pid=174}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:23:55,859 DEBUG [RS_CLOSE_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_CLOSE_REGION, pid=174}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 771007c821cc85e485653aceb22dba4b, store=C 2024-11-27T16:23:55,859 DEBUG [RS_CLOSE_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_CLOSE_REGION, pid=174}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T16:23:55,863 DEBUG [RS_CLOSE_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_CLOSE_REGION, pid=174}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241127fe5eb693da834dc7a15bc0567070e659_771007c821cc85e485653aceb22dba4b is 50, key is test_row_0/A:col10/1732724635627/Put/seqid=0 2024-11-27T16:23:55,866 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742582_1758 (size=12454) 2024-11-27T16:23:56,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=171 2024-11-27T16:23:56,267 DEBUG [RS_CLOSE_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_CLOSE_REGION, pid=174}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T16:23:56,270 INFO [RS_CLOSE_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_CLOSE_REGION, pid=174}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241127fe5eb693da834dc7a15bc0567070e659_771007c821cc85e485653aceb22dba4b to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241127fe5eb693da834dc7a15bc0567070e659_771007c821cc85e485653aceb22dba4b 2024-11-27T16:23:56,271 DEBUG [RS_CLOSE_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_CLOSE_REGION, pid=174}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/.tmp/A/6d5cd9f7237641f0a69b927646687fa1, store: [table=TestAcidGuarantees family=A region=771007c821cc85e485653aceb22dba4b] 2024-11-27T16:23:56,271 DEBUG [RS_CLOSE_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_CLOSE_REGION, pid=174}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/.tmp/A/6d5cd9f7237641f0a69b927646687fa1 is 175, key is test_row_0/A:col10/1732724635627/Put/seqid=0 2024-11-27T16:23:56,274 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742583_1759 (size=31255) 2024-11-27T16:23:56,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=171 2024-11-27T16:23:56,675 INFO [RS_CLOSE_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_CLOSE_REGION, pid=174}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=373, memsize=11.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/.tmp/A/6d5cd9f7237641f0a69b927646687fa1 2024-11-27T16:23:56,679 DEBUG [RS_CLOSE_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_CLOSE_REGION, pid=174}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/.tmp/B/a2eefdd3272d4c92b832305ecab3d0b8 is 50, key is test_row_0/B:col10/1732724635627/Put/seqid=0 2024-11-27T16:23:56,682 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742584_1760 (size=12301) 2024-11-27T16:23:56,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=171 2024-11-27T16:23:57,083 INFO [RS_CLOSE_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_CLOSE_REGION, pid=174}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=373 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/.tmp/B/a2eefdd3272d4c92b832305ecab3d0b8 2024-11-27T16:23:57,087 DEBUG [RS_CLOSE_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_CLOSE_REGION, pid=174}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/.tmp/C/44dc38ac9aab4ce6ae59d2141ef73890 is 50, key is test_row_0/C:col10/1732724635627/Put/seqid=0 2024-11-27T16:23:57,090 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742585_1761 (size=12301) 2024-11-27T16:23:57,491 INFO [RS_CLOSE_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_CLOSE_REGION, pid=174}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=373 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/.tmp/C/44dc38ac9aab4ce6ae59d2141ef73890 2024-11-27T16:23:57,494 DEBUG [RS_CLOSE_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_CLOSE_REGION, pid=174}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/.tmp/A/6d5cd9f7237641f0a69b927646687fa1 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/A/6d5cd9f7237641f0a69b927646687fa1 2024-11-27T16:23:57,496 INFO [RS_CLOSE_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_CLOSE_REGION, pid=174}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/A/6d5cd9f7237641f0a69b927646687fa1, entries=150, sequenceid=373, filesize=30.5 K 2024-11-27T16:23:57,497 DEBUG [RS_CLOSE_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_CLOSE_REGION, pid=174}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/.tmp/B/a2eefdd3272d4c92b832305ecab3d0b8 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/B/a2eefdd3272d4c92b832305ecab3d0b8 2024-11-27T16:23:57,499 INFO [RS_CLOSE_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_CLOSE_REGION, pid=174}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/B/a2eefdd3272d4c92b832305ecab3d0b8, entries=150, sequenceid=373, filesize=12.0 K 2024-11-27T16:23:57,500 DEBUG [RS_CLOSE_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_CLOSE_REGION, pid=174}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/.tmp/C/44dc38ac9aab4ce6ae59d2141ef73890 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/C/44dc38ac9aab4ce6ae59d2141ef73890 2024-11-27T16:23:57,502 INFO [RS_CLOSE_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_CLOSE_REGION, pid=174}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/C/44dc38ac9aab4ce6ae59d2141ef73890, entries=150, sequenceid=373, filesize=12.0 K 2024-11-27T16:23:57,503 INFO [RS_CLOSE_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_CLOSE_REGION, pid=174}] regionserver.HRegion(3040): Finished flush of dataSize ~33.54 KB/34350, heapSize ~88.59 KB/90720, currentSize=0 B/0 for 771007c821cc85e485653aceb22dba4b in 1645ms, sequenceid=373, compaction requested=true 2024-11-27T16:23:57,503 DEBUG [StoreCloser-TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/A/c769303f10bd41c8adb8f7cb97f451c2, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/A/6a97e462c4ff4afd8c0fe7d1a7158a95, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/A/f70e6599e09c4af584ccd96b67e646ea, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/A/50ebfba186954f0fae62d8a5193b4572, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/A/98f17541f0de471fb05d373c3358e32e, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/A/68bc21f36f5848f98db23990c6183ff4, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/A/ef4d3aaa2d444600ab42e28791e7e8f0, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/A/5223feaf019f4799897f1ee64bd71cd6, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/A/df89bfcd088e4a5c9dea8da7df5be53a, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/A/e363b486582e489f8a5d709690f703a8, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/A/96ecebd0336f4b52b413b679253e7892, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/A/ec1429db38d946f68b9480a5c60509c8, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/A/93c25eecab824b60ac3606eb0fa49336, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/A/8dc9dbb471914717a1ebfdd9f5cd5e28, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/A/2b036d1d0521450187f99bf2a2fdb1fe, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/A/053d7c73f999431fb8543d70083b0104, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/A/086e5fbcee744012880e1c9cb5dcd58c, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/A/728684a847e34415ae1e112b3a970fbf, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/A/641461dfb3b74e5eb5a38137d18cac3e, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/A/72f4e26c83e7449492d48d7400a17e12, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/A/a15a3764e81c4d0098b90cc62741a466, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/A/1e33543b4c6847a2af0b6229f9d12100, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/A/eb129477e78e460dad8355f263f9816f, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/A/3fa25bc9553e465aa8a647df29ce0db8] to archive 2024-11-27T16:23:57,504 DEBUG [StoreCloser-TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-27T16:23:57,505 DEBUG [StoreCloser-TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/A/c769303f10bd41c8adb8f7cb97f451c2 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/A/c769303f10bd41c8adb8f7cb97f451c2 2024-11-27T16:23:57,506 DEBUG [StoreCloser-TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/A/6a97e462c4ff4afd8c0fe7d1a7158a95 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/A/6a97e462c4ff4afd8c0fe7d1a7158a95 2024-11-27T16:23:57,507 DEBUG [StoreCloser-TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/A/f70e6599e09c4af584ccd96b67e646ea to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/A/f70e6599e09c4af584ccd96b67e646ea 2024-11-27T16:23:57,507 DEBUG [StoreCloser-TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/A/50ebfba186954f0fae62d8a5193b4572 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/A/50ebfba186954f0fae62d8a5193b4572 2024-11-27T16:23:57,508 DEBUG [StoreCloser-TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/A/98f17541f0de471fb05d373c3358e32e to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/A/98f17541f0de471fb05d373c3358e32e 2024-11-27T16:23:57,509 DEBUG [StoreCloser-TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/A/68bc21f36f5848f98db23990c6183ff4 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/A/68bc21f36f5848f98db23990c6183ff4 2024-11-27T16:23:57,510 DEBUG [StoreCloser-TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/A/ef4d3aaa2d444600ab42e28791e7e8f0 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/A/ef4d3aaa2d444600ab42e28791e7e8f0 2024-11-27T16:23:57,511 DEBUG [StoreCloser-TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/A/5223feaf019f4799897f1ee64bd71cd6 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/A/5223feaf019f4799897f1ee64bd71cd6 2024-11-27T16:23:57,511 DEBUG [StoreCloser-TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/A/df89bfcd088e4a5c9dea8da7df5be53a to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/A/df89bfcd088e4a5c9dea8da7df5be53a 2024-11-27T16:23:57,512 DEBUG [StoreCloser-TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/A/e363b486582e489f8a5d709690f703a8 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/A/e363b486582e489f8a5d709690f703a8 2024-11-27T16:23:57,513 DEBUG [StoreCloser-TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/A/96ecebd0336f4b52b413b679253e7892 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/A/96ecebd0336f4b52b413b679253e7892 2024-11-27T16:23:57,514 DEBUG [StoreCloser-TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/A/ec1429db38d946f68b9480a5c60509c8 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/A/ec1429db38d946f68b9480a5c60509c8 2024-11-27T16:23:57,514 DEBUG [StoreCloser-TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/A/93c25eecab824b60ac3606eb0fa49336 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/A/93c25eecab824b60ac3606eb0fa49336 2024-11-27T16:23:57,515 DEBUG [StoreCloser-TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/A/8dc9dbb471914717a1ebfdd9f5cd5e28 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/A/8dc9dbb471914717a1ebfdd9f5cd5e28 2024-11-27T16:23:57,516 DEBUG [StoreCloser-TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/A/2b036d1d0521450187f99bf2a2fdb1fe to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/A/2b036d1d0521450187f99bf2a2fdb1fe 2024-11-27T16:23:57,517 DEBUG [StoreCloser-TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/A/053d7c73f999431fb8543d70083b0104 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/A/053d7c73f999431fb8543d70083b0104 2024-11-27T16:23:57,517 DEBUG [StoreCloser-TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/A/086e5fbcee744012880e1c9cb5dcd58c to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/A/086e5fbcee744012880e1c9cb5dcd58c 2024-11-27T16:23:57,518 DEBUG [StoreCloser-TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/A/728684a847e34415ae1e112b3a970fbf to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/A/728684a847e34415ae1e112b3a970fbf 2024-11-27T16:23:57,519 DEBUG [StoreCloser-TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/A/641461dfb3b74e5eb5a38137d18cac3e to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/A/641461dfb3b74e5eb5a38137d18cac3e 2024-11-27T16:23:57,520 DEBUG [StoreCloser-TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/A/72f4e26c83e7449492d48d7400a17e12 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/A/72f4e26c83e7449492d48d7400a17e12 2024-11-27T16:23:57,520 DEBUG [StoreCloser-TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/A/a15a3764e81c4d0098b90cc62741a466 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/A/a15a3764e81c4d0098b90cc62741a466 2024-11-27T16:23:57,522 DEBUG [StoreCloser-TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/A/1e33543b4c6847a2af0b6229f9d12100 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/A/1e33543b4c6847a2af0b6229f9d12100 2024-11-27T16:23:57,523 DEBUG [StoreCloser-TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/A/eb129477e78e460dad8355f263f9816f to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/A/eb129477e78e460dad8355f263f9816f 2024-11-27T16:23:57,523 DEBUG [StoreCloser-TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/A/3fa25bc9553e465aa8a647df29ce0db8 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/A/3fa25bc9553e465aa8a647df29ce0db8 2024-11-27T16:23:57,524 DEBUG [StoreCloser-TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/B/68a11f410ea947e9b5ddbac368285266, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/B/08b218d73e7546bdb57e490353da8f0d, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/B/10740185d1fc4a74aaa4923736c6cbb3, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/B/db1b28dff7044aa4a7a18eac5233176c, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/B/bc48ebd121ab4a4a9a580ad5583c92a5, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/B/bdcf10f210da4969baec322bf6766b23, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/B/dc10e3811ab6475488f57fcf8d362cc1, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/B/5985980d92c64f4fa8e06af189374428, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/B/7ade542436be4518bcbccaff01b2cb2c, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/B/9f6f084fdaa34cedbfc3c431f490294a, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/B/26c678bfa0ca4ba28da987d1fdefdc3e, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/B/a578bffa410647e4bc7fbe4190a56f41, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/B/a17ad94addac4d4192eb6c5373eedf05, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/B/39a23149aec6486785fd8db94cc5275a, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/B/5ba25a2ebf7148458f5b05109491c405, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/B/ad013440164b46c6a3148fa28e15e5b3, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/B/5df8f7b1208d4435a6df8a9353d63168, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/B/d4db919c21954b5ea76fda64a178df8b, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/B/15c405ff2fa84cffbb247bf59339b576, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/B/7da9a7332db042acb5b9bd3828bedf1b, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/B/d4445e0d37604ee398b6f41675091827, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/B/a16323b3417c4865a927263cb158b876, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/B/5421e2ad3b43414292038387b23d9297, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/B/1643bc3e8acb4886ba8b50784fec9fc1] to archive 2024-11-27T16:23:57,525 DEBUG [StoreCloser-TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-27T16:23:57,526 DEBUG [StoreCloser-TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/B/68a11f410ea947e9b5ddbac368285266 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/B/68a11f410ea947e9b5ddbac368285266 2024-11-27T16:23:57,527 DEBUG [StoreCloser-TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/B/08b218d73e7546bdb57e490353da8f0d to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/B/08b218d73e7546bdb57e490353da8f0d 2024-11-27T16:23:57,528 DEBUG [StoreCloser-TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/B/10740185d1fc4a74aaa4923736c6cbb3 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/B/10740185d1fc4a74aaa4923736c6cbb3 2024-11-27T16:23:57,529 DEBUG [StoreCloser-TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/B/db1b28dff7044aa4a7a18eac5233176c to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/B/db1b28dff7044aa4a7a18eac5233176c 2024-11-27T16:23:57,529 DEBUG [StoreCloser-TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/B/bc48ebd121ab4a4a9a580ad5583c92a5 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/B/bc48ebd121ab4a4a9a580ad5583c92a5 2024-11-27T16:23:57,530 DEBUG [StoreCloser-TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/B/bdcf10f210da4969baec322bf6766b23 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/B/bdcf10f210da4969baec322bf6766b23 2024-11-27T16:23:57,531 DEBUG [StoreCloser-TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/B/dc10e3811ab6475488f57fcf8d362cc1 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/B/dc10e3811ab6475488f57fcf8d362cc1 2024-11-27T16:23:57,532 DEBUG [StoreCloser-TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/B/5985980d92c64f4fa8e06af189374428 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/B/5985980d92c64f4fa8e06af189374428 2024-11-27T16:23:57,533 DEBUG [StoreCloser-TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/B/7ade542436be4518bcbccaff01b2cb2c to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/B/7ade542436be4518bcbccaff01b2cb2c 2024-11-27T16:23:57,534 DEBUG [StoreCloser-TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/B/9f6f084fdaa34cedbfc3c431f490294a to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/B/9f6f084fdaa34cedbfc3c431f490294a 2024-11-27T16:23:57,534 DEBUG [StoreCloser-TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/B/26c678bfa0ca4ba28da987d1fdefdc3e to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/B/26c678bfa0ca4ba28da987d1fdefdc3e 2024-11-27T16:23:57,535 DEBUG [StoreCloser-TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/B/a578bffa410647e4bc7fbe4190a56f41 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/B/a578bffa410647e4bc7fbe4190a56f41 2024-11-27T16:23:57,536 DEBUG [StoreCloser-TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/B/a17ad94addac4d4192eb6c5373eedf05 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/B/a17ad94addac4d4192eb6c5373eedf05 2024-11-27T16:23:57,536 DEBUG [StoreCloser-TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/B/39a23149aec6486785fd8db94cc5275a to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/B/39a23149aec6486785fd8db94cc5275a 2024-11-27T16:23:57,537 DEBUG [StoreCloser-TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/B/5ba25a2ebf7148458f5b05109491c405 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/B/5ba25a2ebf7148458f5b05109491c405 2024-11-27T16:23:57,538 DEBUG [StoreCloser-TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/B/ad013440164b46c6a3148fa28e15e5b3 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/B/ad013440164b46c6a3148fa28e15e5b3 2024-11-27T16:23:57,538 DEBUG [StoreCloser-TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/B/5df8f7b1208d4435a6df8a9353d63168 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/B/5df8f7b1208d4435a6df8a9353d63168 2024-11-27T16:23:57,539 DEBUG [StoreCloser-TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/B/d4db919c21954b5ea76fda64a178df8b to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/B/d4db919c21954b5ea76fda64a178df8b 2024-11-27T16:23:57,540 DEBUG [StoreCloser-TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/B/15c405ff2fa84cffbb247bf59339b576 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/B/15c405ff2fa84cffbb247bf59339b576 2024-11-27T16:23:57,540 DEBUG [StoreCloser-TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/B/7da9a7332db042acb5b9bd3828bedf1b to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/B/7da9a7332db042acb5b9bd3828bedf1b 2024-11-27T16:23:57,541 DEBUG [StoreCloser-TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/B/d4445e0d37604ee398b6f41675091827 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/B/d4445e0d37604ee398b6f41675091827 2024-11-27T16:23:57,542 DEBUG [StoreCloser-TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/B/a16323b3417c4865a927263cb158b876 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/B/a16323b3417c4865a927263cb158b876 2024-11-27T16:23:57,543 DEBUG [StoreCloser-TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/B/5421e2ad3b43414292038387b23d9297 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/B/5421e2ad3b43414292038387b23d9297 2024-11-27T16:23:57,543 DEBUG [StoreCloser-TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/B/1643bc3e8acb4886ba8b50784fec9fc1 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/B/1643bc3e8acb4886ba8b50784fec9fc1 2024-11-27T16:23:57,544 DEBUG [StoreCloser-TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/C/6f7d6b56624f4e40899d7e345b3bb59f, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/C/76618a60baee4a28b4696aac925b23c8, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/C/fd1900434fdb47c59b945b8c2c3c3ce2, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/C/1cf56d1afb2a44568b1fabaedc38ac2c, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/C/b17a7914226140b8b0be8c050544126c, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/C/a9ff9b6cfbdf4703a428cfd836eb52e0, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/C/f8c178e51e39487f8de9e67a25267aae, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/C/3930a0ab195c4af0ba9758e11db068a8, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/C/c6a8bbe7286e4a2b8cd07051015dfacb, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/C/871f103f757a40458fcb6ee1c6798295, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/C/724cef2dfb224cea98ff09ca46a26e49, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/C/742d8ff7a1ca4a70bbd8f65f3a3d90ad, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/C/b1009ad8097649b88ce9ee2dbd37a1fa, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/C/50dc7945996c495d97a5975aaa63ebee, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/C/74fdaa47f7f242afb68206037985e132, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/C/3f551bcd13b34cdfb73fd8a379a677a5, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/C/59b174b28cd64bb59b40c26878ae0353, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/C/6807a17188454156994f7ec66616e590, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/C/647331dde44b47ef8f4165c366037971, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/C/1ca077f1e2af410fa3dbabf635a6b0f7, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/C/ca24ad61ede74714a8c6dbdfcfc213f4, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/C/252a4daa85e74c329ad71533894d5298, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/C/2dfaf41f009c48c9b0520c21d13a4a47] to archive 2024-11-27T16:23:57,545 DEBUG [StoreCloser-TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-27T16:23:57,546 DEBUG [StoreCloser-TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/C/6f7d6b56624f4e40899d7e345b3bb59f to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/C/6f7d6b56624f4e40899d7e345b3bb59f 2024-11-27T16:23:57,546 DEBUG [StoreCloser-TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/C/76618a60baee4a28b4696aac925b23c8 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/C/76618a60baee4a28b4696aac925b23c8 2024-11-27T16:23:57,547 DEBUG [StoreCloser-TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/C/fd1900434fdb47c59b945b8c2c3c3ce2 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/C/fd1900434fdb47c59b945b8c2c3c3ce2 2024-11-27T16:23:57,548 DEBUG [StoreCloser-TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/C/1cf56d1afb2a44568b1fabaedc38ac2c to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/C/1cf56d1afb2a44568b1fabaedc38ac2c 2024-11-27T16:23:57,549 DEBUG [StoreCloser-TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/C/b17a7914226140b8b0be8c050544126c to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/C/b17a7914226140b8b0be8c050544126c 2024-11-27T16:23:57,549 DEBUG [StoreCloser-TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/C/a9ff9b6cfbdf4703a428cfd836eb52e0 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/C/a9ff9b6cfbdf4703a428cfd836eb52e0 2024-11-27T16:23:57,550 DEBUG [StoreCloser-TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/C/f8c178e51e39487f8de9e67a25267aae to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/C/f8c178e51e39487f8de9e67a25267aae 2024-11-27T16:23:57,551 DEBUG [StoreCloser-TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/C/3930a0ab195c4af0ba9758e11db068a8 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/C/3930a0ab195c4af0ba9758e11db068a8 2024-11-27T16:23:57,552 DEBUG [StoreCloser-TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/C/c6a8bbe7286e4a2b8cd07051015dfacb to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/C/c6a8bbe7286e4a2b8cd07051015dfacb 2024-11-27T16:23:57,553 DEBUG [StoreCloser-TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/C/871f103f757a40458fcb6ee1c6798295 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/C/871f103f757a40458fcb6ee1c6798295 2024-11-27T16:23:57,553 DEBUG [StoreCloser-TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/C/724cef2dfb224cea98ff09ca46a26e49 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/C/724cef2dfb224cea98ff09ca46a26e49 2024-11-27T16:23:57,554 DEBUG [StoreCloser-TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/C/742d8ff7a1ca4a70bbd8f65f3a3d90ad to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/C/742d8ff7a1ca4a70bbd8f65f3a3d90ad 2024-11-27T16:23:57,555 DEBUG [StoreCloser-TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/C/b1009ad8097649b88ce9ee2dbd37a1fa to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/C/b1009ad8097649b88ce9ee2dbd37a1fa 2024-11-27T16:23:57,555 DEBUG [StoreCloser-TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/C/50dc7945996c495d97a5975aaa63ebee to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/C/50dc7945996c495d97a5975aaa63ebee 2024-11-27T16:23:57,556 DEBUG [StoreCloser-TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/C/74fdaa47f7f242afb68206037985e132 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/C/74fdaa47f7f242afb68206037985e132 2024-11-27T16:23:57,557 DEBUG [StoreCloser-TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/C/3f551bcd13b34cdfb73fd8a379a677a5 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/C/3f551bcd13b34cdfb73fd8a379a677a5 2024-11-27T16:23:57,558 DEBUG [StoreCloser-TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/C/59b174b28cd64bb59b40c26878ae0353 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/C/59b174b28cd64bb59b40c26878ae0353 2024-11-27T16:23:57,558 DEBUG [StoreCloser-TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/C/6807a17188454156994f7ec66616e590 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/C/6807a17188454156994f7ec66616e590 2024-11-27T16:23:57,559 DEBUG [StoreCloser-TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/C/647331dde44b47ef8f4165c366037971 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/C/647331dde44b47ef8f4165c366037971 2024-11-27T16:23:57,560 DEBUG [StoreCloser-TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/C/1ca077f1e2af410fa3dbabf635a6b0f7 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/C/1ca077f1e2af410fa3dbabf635a6b0f7 2024-11-27T16:23:57,560 DEBUG [StoreCloser-TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/C/ca24ad61ede74714a8c6dbdfcfc213f4 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/C/ca24ad61ede74714a8c6dbdfcfc213f4 2024-11-27T16:23:57,561 DEBUG [StoreCloser-TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/C/252a4daa85e74c329ad71533894d5298 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/C/252a4daa85e74c329ad71533894d5298 2024-11-27T16:23:57,562 DEBUG [StoreCloser-TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/C/2dfaf41f009c48c9b0520c21d13a4a47 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/C/2dfaf41f009c48c9b0520c21d13a4a47 2024-11-27T16:23:57,565 DEBUG [RS_CLOSE_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_CLOSE_REGION, pid=174}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/recovered.edits/376.seqid, newMaxSeqId=376, maxSeqId=4 2024-11-27T16:23:57,565 INFO [RS_CLOSE_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_CLOSE_REGION, pid=174}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b. 2024-11-27T16:23:57,565 DEBUG [RS_CLOSE_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_CLOSE_REGION, pid=174}] regionserver.HRegion(1635): Region close journal for 771007c821cc85e485653aceb22dba4b: 2024-11-27T16:23:57,567 INFO [RS_CLOSE_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_CLOSE_REGION, pid=174}] handler.UnassignRegionHandler(170): Closed 771007c821cc85e485653aceb22dba4b 2024-11-27T16:23:57,567 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=173 updating hbase:meta row=771007c821cc85e485653aceb22dba4b, regionState=CLOSED 2024-11-27T16:23:57,569 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=174, resume processing ppid=173 2024-11-27T16:23:57,569 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=174, ppid=173, state=SUCCESS; CloseRegionProcedure 771007c821cc85e485653aceb22dba4b, server=7b191dec6496,44169,1732724452967 in 1.8610 sec 2024-11-27T16:23:57,570 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=173, resume processing ppid=172 2024-11-27T16:23:57,570 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=173, ppid=172, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=771007c821cc85e485653aceb22dba4b, UNASSIGN in 1.8640 sec 2024-11-27T16:23:57,571 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=172, resume processing ppid=171 2024-11-27T16:23:57,571 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=172, ppid=171, state=SUCCESS; CloseTableRegionsProcedure table=TestAcidGuarantees in 1.8660 sec 2024-11-27T16:23:57,571 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732724637571"}]},"ts":"1732724637571"} 2024-11-27T16:23:57,572 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLED in hbase:meta 2024-11-27T16:23:57,575 INFO [PEWorker-1 {}] procedure.DisableTableProcedure(296): Set TestAcidGuarantees to state=DISABLED 2024-11-27T16:23:57,576 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=171, state=SUCCESS; DisableTableProcedure table=TestAcidGuarantees in 1.8770 sec 2024-11-27T16:23:57,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=171 2024-11-27T16:23:57,804 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:TestAcidGuarantees, procId: 171 completed 2024-11-27T16:23:57,805 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete TestAcidGuarantees 2024-11-27T16:23:57,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] procedure2.ProcedureExecutor(1098): Stored pid=175, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=TestAcidGuarantees 2024-11-27T16:23:57,806 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=175, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-27T16:23:57,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=175 2024-11-27T16:23:57,807 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=175, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-27T16:23:57,808 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b 2024-11-27T16:23:57,809 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/A, FileablePath, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/B, FileablePath, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/C, FileablePath, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/recovered.edits] 2024-11-27T16:23:57,811 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/A/6d5cd9f7237641f0a69b927646687fa1 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/A/6d5cd9f7237641f0a69b927646687fa1 2024-11-27T16:23:57,812 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/A/716d25e8896c4ff6b81e1150d28f004e to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/A/716d25e8896c4ff6b81e1150d28f004e 2024-11-27T16:23:57,813 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/A/adbd4fcb7c644d92a9690f47d2a16c0c to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/A/adbd4fcb7c644d92a9690f47d2a16c0c 2024-11-27T16:23:57,815 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/B/1ac7319ba0fc41f7afbfae6b771623e3 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/B/1ac7319ba0fc41f7afbfae6b771623e3 2024-11-27T16:23:57,816 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/B/2420c1a2d94349e885b8b53a3999a2be to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/B/2420c1a2d94349e885b8b53a3999a2be 2024-11-27T16:23:57,816 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/B/a2eefdd3272d4c92b832305ecab3d0b8 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/B/a2eefdd3272d4c92b832305ecab3d0b8 2024-11-27T16:23:57,818 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/C/44dc38ac9aab4ce6ae59d2141ef73890 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/C/44dc38ac9aab4ce6ae59d2141ef73890 2024-11-27T16:23:57,819 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/C/63ad31bd244b47d5b111a6afdb662f60 to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/C/63ad31bd244b47d5b111a6afdb662f60 2024-11-27T16:23:57,820 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/C/bc7e8946091e4fd3b536f1ee42e3908c to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/C/bc7e8946091e4fd3b536f1ee42e3908c 2024-11-27T16:23:57,822 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/recovered.edits/376.seqid to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b/recovered.edits/376.seqid 2024-11-27T16:23:57,822 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/default/TestAcidGuarantees/771007c821cc85e485653aceb22dba4b 2024-11-27T16:23:57,823 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(313): Archived TestAcidGuarantees regions 2024-11-27T16:23:57,823 DEBUG [PEWorker-3 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3 2024-11-27T16:23:57,823 DEBUG [PEWorker-3 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A] 2024-11-27T16:23:57,825 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411270a53d7b5652444cc9149786f92255ac5_771007c821cc85e485653aceb22dba4b to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411270a53d7b5652444cc9149786f92255ac5_771007c821cc85e485653aceb22dba4b 2024-11-27T16:23:57,826 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112716bad35b199540f8b04fc3a86fa4a418_771007c821cc85e485653aceb22dba4b to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112716bad35b199540f8b04fc3a86fa4a418_771007c821cc85e485653aceb22dba4b 2024-11-27T16:23:57,827 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112739e7121eea6543ba9eec09a72290f354_771007c821cc85e485653aceb22dba4b to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112739e7121eea6543ba9eec09a72290f354_771007c821cc85e485653aceb22dba4b 2024-11-27T16:23:57,828 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411273d86bdf4119e436a890db5f14dc2b7f2_771007c821cc85e485653aceb22dba4b to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411273d86bdf4119e436a890db5f14dc2b7f2_771007c821cc85e485653aceb22dba4b 2024-11-27T16:23:57,829 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411274ba6adbfa6024ed284dd9108fdb7e251_771007c821cc85e485653aceb22dba4b to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411274ba6adbfa6024ed284dd9108fdb7e251_771007c821cc85e485653aceb22dba4b 2024-11-27T16:23:57,829 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241127594e8e1615354564af91972d5eb3cc31_771007c821cc85e485653aceb22dba4b to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241127594e8e1615354564af91972d5eb3cc31_771007c821cc85e485653aceb22dba4b 2024-11-27T16:23:57,830 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411275a22cf24bea04b0c85baac7d496f236f_771007c821cc85e485653aceb22dba4b to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411275a22cf24bea04b0c85baac7d496f236f_771007c821cc85e485653aceb22dba4b 2024-11-27T16:23:57,831 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241127684c7031bc9e4f33b5aec3ac36b52d8d_771007c821cc85e485653aceb22dba4b to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241127684c7031bc9e4f33b5aec3ac36b52d8d_771007c821cc85e485653aceb22dba4b 2024-11-27T16:23:57,832 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241127685040b6b0d74871a8a097cca7437911_771007c821cc85e485653aceb22dba4b to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241127685040b6b0d74871a8a097cca7437911_771007c821cc85e485653aceb22dba4b 2024-11-27T16:23:57,833 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411279d5a5cbf3f724a23b45236dbe65ea441_771007c821cc85e485653aceb22dba4b to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411279d5a5cbf3f724a23b45236dbe65ea441_771007c821cc85e485653aceb22dba4b 2024-11-27T16:23:57,834 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241127a5559ade3a18460a9f83f83f69396855_771007c821cc85e485653aceb22dba4b to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241127a5559ade3a18460a9f83f83f69396855_771007c821cc85e485653aceb22dba4b 2024-11-27T16:23:57,835 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241127b221a84fa35241e4a8509812133c396e_771007c821cc85e485653aceb22dba4b to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241127b221a84fa35241e4a8509812133c396e_771007c821cc85e485653aceb22dba4b 2024-11-27T16:23:57,835 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241127c4cbe4e818f94da09881928988aef8ef_771007c821cc85e485653aceb22dba4b to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241127c4cbe4e818f94da09881928988aef8ef_771007c821cc85e485653aceb22dba4b 2024-11-27T16:23:57,836 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241127ca3c36ee16aa4a04ad9035bbe29bdab1_771007c821cc85e485653aceb22dba4b to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241127ca3c36ee16aa4a04ad9035bbe29bdab1_771007c821cc85e485653aceb22dba4b 2024-11-27T16:23:57,837 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241127d4f545e329b947c9b50244ac2169408f_771007c821cc85e485653aceb22dba4b to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241127d4f545e329b947c9b50244ac2169408f_771007c821cc85e485653aceb22dba4b 2024-11-27T16:23:57,838 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241127d6e12cc70b04452cbe0b0b3966d59787_771007c821cc85e485653aceb22dba4b to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241127d6e12cc70b04452cbe0b0b3966d59787_771007c821cc85e485653aceb22dba4b 2024-11-27T16:23:57,839 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241127f5a0755802e248b192d93b72ce6b0b33_771007c821cc85e485653aceb22dba4b to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241127f5a0755802e248b192d93b72ce6b0b33_771007c821cc85e485653aceb22dba4b 2024-11-27T16:23:57,839 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241127faa53db3dddf4057b66b6fa116accc8c_771007c821cc85e485653aceb22dba4b to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241127faa53db3dddf4057b66b6fa116accc8c_771007c821cc85e485653aceb22dba4b 2024-11-27T16:23:57,840 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241127fe0e3dfc262f4486a086370f9ccc259b_771007c821cc85e485653aceb22dba4b to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241127fe0e3dfc262f4486a086370f9ccc259b_771007c821cc85e485653aceb22dba4b 2024-11-27T16:23:57,841 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241127fe5eb693da834dc7a15bc0567070e659_771007c821cc85e485653aceb22dba4b to hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241127fe5eb693da834dc7a15bc0567070e659_771007c821cc85e485653aceb22dba4b 2024-11-27T16:23:57,841 DEBUG [PEWorker-3 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3 2024-11-27T16:23:57,843 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=175, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-27T16:23:57,845 WARN [PEWorker-3 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 1 rows of TestAcidGuarantees from hbase:meta 2024-11-27T16:23:57,846 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(407): Removing 'TestAcidGuarantees' descriptor. 2024-11-27T16:23:57,847 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=175, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-27T16:23:57,847 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(397): Removing 'TestAcidGuarantees' from region states. 2024-11-27T16:23:57,847 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732724637847"}]},"ts":"9223372036854775807"} 2024-11-27T16:23:57,848 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1808): Deleted 1 regions from META 2024-11-27T16:23:57,848 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => 771007c821cc85e485653aceb22dba4b, NAME => 'TestAcidGuarantees,,1732724604414.771007c821cc85e485653aceb22dba4b.', STARTKEY => '', ENDKEY => ''}] 2024-11-27T16:23:57,848 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(401): Marking 'TestAcidGuarantees' as deleted. 2024-11-27T16:23:57,848 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1732724637848"}]},"ts":"9223372036854775807"} 2024-11-27T16:23:57,849 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1678): Deleted table TestAcidGuarantees state from META 2024-11-27T16:23:57,851 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(133): Finished pid=175, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-27T16:23:57,852 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=175, state=SUCCESS; DeleteTableProcedure table=TestAcidGuarantees in 46 msec 2024-11-27T16:23:57,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=175 2024-11-27T16:23:57,908 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:TestAcidGuarantees, procId: 175 completed 2024-11-27T16:23:57,917 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: TestAcidGuaranteesWithBasicPolicy#testMobGetAtomicity Thread=236 (was 239), OpenFileDescriptor=443 (was 450), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=782 (was 831), ProcessCount=11 (was 11), AvailableMemoryMB=3698 (was 3798) 2024-11-27T16:23:57,917 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1340): Shutting down minicluster 2024-11-27T16:23:57,917 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-11-27T16:23:57,917 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x0870ca2a to 127.0.0.1:51088 2024-11-27T16:23:57,917 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-27T16:23:57,917 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-27T16:23:57,917 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=572881020, stopped=false 2024-11-27T16:23:57,917 INFO [Time-limited test {}] master.ServerManager(987): Cluster shutdown requested of master=7b191dec6496,41377,1732724452229 2024-11-27T16:23:57,919 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41377-0x10039c8a1050000, quorum=127.0.0.1:51088, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-27T16:23:57,919 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44169-0x10039c8a1050001, quorum=127.0.0.1:51088, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-27T16:23:57,919 INFO [Time-limited test {}] procedure2.ProcedureExecutor(700): Stopping 2024-11-27T16:23:57,919 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44169-0x10039c8a1050001, quorum=127.0.0.1:51088, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-27T16:23:57,919 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41377-0x10039c8a1050000, quorum=127.0.0.1:51088, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-27T16:23:57,919 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-27T16:23:57,920 INFO [Time-limited test {}] regionserver.HRegionServer(2561): ***** STOPPING region server '7b191dec6496,44169,1732724452967' ***** 2024-11-27T16:23:57,920 INFO [Time-limited test {}] regionserver.HRegionServer(2575): STOPPED: Shutdown requested 2024-11-27T16:23:57,920 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:41377-0x10039c8a1050000, quorum=127.0.0.1:51088, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-27T16:23:57,920 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:44169-0x10039c8a1050001, quorum=127.0.0.1:51088, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-27T16:23:57,920 INFO [RS:0;7b191dec6496:44169 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-27T16:23:57,920 INFO [RS:0;7b191dec6496:44169 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-27T16:23:57,920 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(373): MemStoreFlusher.0 exiting 2024-11-27T16:23:57,920 INFO [RS:0;7b191dec6496:44169 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-27T16:23:57,921 INFO [RS:0;7b191dec6496:44169 {}] regionserver.HRegionServer(3579): Received CLOSE for fb56c9d94acc1b64bf2472d65ab81174 2024-11-27T16:23:57,921 INFO [RS:0;7b191dec6496:44169 {}] regionserver.HRegionServer(1224): stopping server 7b191dec6496,44169,1732724452967 2024-11-27T16:23:57,921 DEBUG [RS:0;7b191dec6496:44169 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-27T16:23:57,921 INFO [RS:0;7b191dec6496:44169 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-27T16:23:57,921 INFO [RS:0;7b191dec6496:44169 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-27T16:23:57,921 INFO [RS:0;7b191dec6496:44169 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-27T16:23:57,921 INFO [RS:0;7b191dec6496:44169 {}] regionserver.HRegionServer(3579): Received CLOSE for 1588230740 2024-11-27T16:23:57,921 DEBUG [RS_CLOSE_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1681): Closing fb56c9d94acc1b64bf2472d65ab81174, disabling compactions & flushes 2024-11-27T16:23:57,921 INFO [RS_CLOSE_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1703): Closing region hbase:namespace,,1732724456218.fb56c9d94acc1b64bf2472d65ab81174. 2024-11-27T16:23:57,921 DEBUG [RS_CLOSE_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:namespace,,1732724456218.fb56c9d94acc1b64bf2472d65ab81174. 2024-11-27T16:23:57,921 DEBUG [RS_CLOSE_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1791): Acquired close lock on hbase:namespace,,1732724456218.fb56c9d94acc1b64bf2472d65ab81174. after waiting 0 ms 2024-11-27T16:23:57,921 INFO [RS:0;7b191dec6496:44169 {}] regionserver.HRegionServer(1599): Waiting on 2 regions to close 2024-11-27T16:23:57,921 DEBUG [RS_CLOSE_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1801): Updates disabled for region hbase:namespace,,1732724456218.fb56c9d94acc1b64bf2472d65ab81174. 2024-11-27T16:23:57,922 DEBUG [RS:0;7b191dec6496:44169 {}] regionserver.HRegionServer(1603): Online Regions={fb56c9d94acc1b64bf2472d65ab81174=hbase:namespace,,1732724456218.fb56c9d94acc1b64bf2472d65ab81174., 1588230740=hbase:meta,,1.1588230740} 2024-11-27T16:23:57,922 INFO [RS_CLOSE_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2837): Flushing fb56c9d94acc1b64bf2472d65ab81174 1/1 column families, dataSize=78 B heapSize=488 B 2024-11-27T16:23:57,922 DEBUG [RS_CLOSE_META-regionserver/7b191dec6496:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1681): Closing 1588230740, disabling compactions & flushes 2024-11-27T16:23:57,922 INFO [RS_CLOSE_META-regionserver/7b191dec6496:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1703): Closing region hbase:meta,,1.1588230740 2024-11-27T16:23:57,922 DEBUG [RS_CLOSE_META-regionserver/7b191dec6496:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:meta,,1.1588230740 2024-11-27T16:23:57,922 DEBUG [RS_CLOSE_META-regionserver/7b191dec6496:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1791): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-27T16:23:57,922 DEBUG [RS_CLOSE_META-regionserver/7b191dec6496:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1801): Updates disabled for region hbase:meta,,1.1588230740 2024-11-27T16:23:57,922 INFO [RS_CLOSE_META-regionserver/7b191dec6496:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2837): Flushing 1588230740 3/3 column families, dataSize=20.55 KB heapSize=35.87 KB 2024-11-27T16:23:57,925 DEBUG [RS:0;7b191dec6496:44169 {}] regionserver.HRegionServer(1629): Waiting on 1588230740, fb56c9d94acc1b64bf2472d65ab81174 2024-11-27T16:23:57,937 DEBUG [RS_CLOSE_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/hbase/namespace/fb56c9d94acc1b64bf2472d65ab81174/.tmp/info/b29d79389c224d26bde4637bdc451683 is 45, key is default/info:d/1732724457571/Put/seqid=0 2024-11-27T16:23:57,940 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742586_1762 (size=5037) 2024-11-27T16:23:57,946 DEBUG [RS_CLOSE_META-regionserver/7b191dec6496:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/hbase/meta/1588230740/.tmp/info/778ecbedb0294f1abe79c8fca57d043a is 143, key is hbase:namespace,,1732724456218.fb56c9d94acc1b64bf2472d65ab81174./info:regioninfo/1732724457452/Put/seqid=0 2024-11-27T16:23:57,949 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742587_1763 (size=7725) 2024-11-27T16:23:57,977 INFO [regionserver/7b191dec6496:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-27T16:23:58,125 DEBUG [RS:0;7b191dec6496:44169 {}] regionserver.HRegionServer(1629): Waiting on 1588230740, fb56c9d94acc1b64bf2472d65ab81174 2024-11-27T16:23:58,325 DEBUG [RS:0;7b191dec6496:44169 {}] regionserver.HRegionServer(1629): Waiting on 1588230740, fb56c9d94acc1b64bf2472d65ab81174 2024-11-27T16:23:58,331 INFO [regionserver/7b191dec6496:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-27T16:23:58,332 INFO [regionserver/7b191dec6496:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-27T16:23:58,341 INFO [RS_CLOSE_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=78 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/hbase/namespace/fb56c9d94acc1b64bf2472d65ab81174/.tmp/info/b29d79389c224d26bde4637bdc451683 2024-11-27T16:23:58,344 DEBUG [RS_CLOSE_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/hbase/namespace/fb56c9d94acc1b64bf2472d65ab81174/.tmp/info/b29d79389c224d26bde4637bdc451683 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/hbase/namespace/fb56c9d94acc1b64bf2472d65ab81174/info/b29d79389c224d26bde4637bdc451683 2024-11-27T16:23:58,347 INFO [RS_CLOSE_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/hbase/namespace/fb56c9d94acc1b64bf2472d65ab81174/info/b29d79389c224d26bde4637bdc451683, entries=2, sequenceid=6, filesize=4.9 K 2024-11-27T16:23:58,348 INFO [RS_CLOSE_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3040): Finished flush of dataSize ~78 B/78, heapSize ~472 B/472, currentSize=0 B/0 for fb56c9d94acc1b64bf2472d65ab81174 in 427ms, sequenceid=6, compaction requested=false 2024-11-27T16:23:58,349 INFO [RS_CLOSE_META-regionserver/7b191dec6496:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=93 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/hbase/meta/1588230740/.tmp/info/778ecbedb0294f1abe79c8fca57d043a 2024-11-27T16:23:58,350 DEBUG [RS_CLOSE_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/hbase/namespace/fb56c9d94acc1b64bf2472d65ab81174/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-27T16:23:58,351 INFO [RS_CLOSE_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1922): Closed hbase:namespace,,1732724456218.fb56c9d94acc1b64bf2472d65ab81174. 2024-11-27T16:23:58,351 DEBUG [RS_CLOSE_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1635): Region close journal for fb56c9d94acc1b64bf2472d65ab81174: 2024-11-27T16:23:58,351 DEBUG [RS_CLOSE_REGION-regionserver/7b191dec6496:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed hbase:namespace,,1732724456218.fb56c9d94acc1b64bf2472d65ab81174. 2024-11-27T16:23:58,366 DEBUG [RS_CLOSE_META-regionserver/7b191dec6496:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/hbase/meta/1588230740/.tmp/rep_barrier/b8a6484ac9a04a2da9edf48cd21d7739 is 102, key is TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9./rep_barrier:/1732724489231/DeleteFamily/seqid=0 2024-11-27T16:23:58,369 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742588_1764 (size=6025) 2024-11-27T16:23:58,526 DEBUG [RS:0;7b191dec6496:44169 {}] regionserver.HRegionServer(1629): Waiting on 1588230740 2024-11-27T16:23:58,726 DEBUG [RS:0;7b191dec6496:44169 {}] regionserver.HRegionServer(1629): Waiting on 1588230740 2024-11-27T16:23:58,769 INFO [RS_CLOSE_META-regionserver/7b191dec6496:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=588 B at sequenceid=93 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/hbase/meta/1588230740/.tmp/rep_barrier/b8a6484ac9a04a2da9edf48cd21d7739 2024-11-27T16:23:58,787 DEBUG [RS_CLOSE_META-regionserver/7b191dec6496:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/hbase/meta/1588230740/.tmp/table/979c1701450f4132a95a23338bcadbb6 is 96, key is TestAcidGuarantees,,1732724457728.d498a187112eb3635082ffac2dfb4cf9./table:/1732724489231/DeleteFamily/seqid=0 2024-11-27T16:23:58,789 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742589_1765 (size=5942) 2024-11-27T16:23:58,926 INFO [RS:0;7b191dec6496:44169 {}] regionserver.HRegionServer(1599): Waiting on 1 regions to close 2024-11-27T16:23:58,926 DEBUG [RS:0;7b191dec6496:44169 {}] regionserver.HRegionServer(1603): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-11-27T16:23:58,926 DEBUG [RS:0;7b191dec6496:44169 {}] regionserver.HRegionServer(1629): Waiting on 1588230740 2024-11-27T16:23:59,126 DEBUG [RS:0;7b191dec6496:44169 {}] regionserver.HRegionServer(1629): Waiting on 1588230740 2024-11-27T16:23:59,190 INFO [RS_CLOSE_META-regionserver/7b191dec6496:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=2.08 KB at sequenceid=93 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/hbase/meta/1588230740/.tmp/table/979c1701450f4132a95a23338bcadbb6 2024-11-27T16:23:59,193 DEBUG [RS_CLOSE_META-regionserver/7b191dec6496:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/hbase/meta/1588230740/.tmp/info/778ecbedb0294f1abe79c8fca57d043a as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/hbase/meta/1588230740/info/778ecbedb0294f1abe79c8fca57d043a 2024-11-27T16:23:59,195 INFO [RS_CLOSE_META-regionserver/7b191dec6496:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/hbase/meta/1588230740/info/778ecbedb0294f1abe79c8fca57d043a, entries=22, sequenceid=93, filesize=7.5 K 2024-11-27T16:23:59,196 DEBUG [RS_CLOSE_META-regionserver/7b191dec6496:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/hbase/meta/1588230740/.tmp/rep_barrier/b8a6484ac9a04a2da9edf48cd21d7739 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/hbase/meta/1588230740/rep_barrier/b8a6484ac9a04a2da9edf48cd21d7739 2024-11-27T16:23:59,198 INFO [RS_CLOSE_META-regionserver/7b191dec6496:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/hbase/meta/1588230740/rep_barrier/b8a6484ac9a04a2da9edf48cd21d7739, entries=6, sequenceid=93, filesize=5.9 K 2024-11-27T16:23:59,199 DEBUG [RS_CLOSE_META-regionserver/7b191dec6496:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/hbase/meta/1588230740/.tmp/table/979c1701450f4132a95a23338bcadbb6 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/hbase/meta/1588230740/table/979c1701450f4132a95a23338bcadbb6 2024-11-27T16:23:59,201 INFO [RS_CLOSE_META-regionserver/7b191dec6496:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/hbase/meta/1588230740/table/979c1701450f4132a95a23338bcadbb6, entries=9, sequenceid=93, filesize=5.8 K 2024-11-27T16:23:59,201 INFO [RS_CLOSE_META-regionserver/7b191dec6496:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3040): Finished flush of dataSize ~20.55 KB/21040, heapSize ~35.82 KB/36680, currentSize=0 B/0 for 1588230740 in 1279ms, sequenceid=93, compaction requested=false 2024-11-27T16:23:59,205 DEBUG [RS_CLOSE_META-regionserver/7b191dec6496:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/data/hbase/meta/1588230740/recovered.edits/96.seqid, newMaxSeqId=96, maxSeqId=1 2024-11-27T16:23:59,205 DEBUG [RS_CLOSE_META-regionserver/7b191dec6496:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-27T16:23:59,205 INFO [RS_CLOSE_META-regionserver/7b191dec6496:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1922): Closed hbase:meta,,1.1588230740 2024-11-27T16:23:59,205 DEBUG [RS_CLOSE_META-regionserver/7b191dec6496:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1635): Region close journal for 1588230740: 2024-11-27T16:23:59,206 DEBUG [RS_CLOSE_META-regionserver/7b191dec6496:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-27T16:23:59,327 INFO [RS:0;7b191dec6496:44169 {}] regionserver.HRegionServer(1250): stopping server 7b191dec6496,44169,1732724452967; all regions closed. 2024-11-27T16:23:59,330 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073741834_1010 (size=26050) 2024-11-27T16:23:59,332 DEBUG [RS:0;7b191dec6496:44169 {}] wal.AbstractFSWAL(1071): Moved 1 WAL file(s) to /user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/oldWALs 2024-11-27T16:23:59,332 INFO [RS:0;7b191dec6496:44169 {}] wal.AbstractFSWAL(1074): Closed WAL: AsyncFSWAL 7b191dec6496%2C44169%2C1732724452967.meta:.meta(num 1732724455952) 2024-11-27T16:23:59,334 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073741833_1009 (size=17754446) 2024-11-27T16:23:59,336 DEBUG [RS:0;7b191dec6496:44169 {}] wal.AbstractFSWAL(1071): Moved 1 WAL file(s) to /user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/oldWALs 2024-11-27T16:23:59,336 INFO [RS:0;7b191dec6496:44169 {}] wal.AbstractFSWAL(1074): Closed WAL: AsyncFSWAL 7b191dec6496%2C44169%2C1732724452967:(num 1732724455472) 2024-11-27T16:23:59,336 DEBUG [RS:0;7b191dec6496:44169 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-27T16:23:59,336 INFO [RS:0;7b191dec6496:44169 {}] regionserver.LeaseManager(133): Closed leases 2024-11-27T16:23:59,336 INFO [RS:0;7b191dec6496:44169 {}] hbase.ChoreService(370): Chore service for: regionserver/7b191dec6496:0 had [ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-11-27T16:23:59,336 INFO [regionserver/7b191dec6496:0.logRoller {}] wal.AbstractWALRoller(243): LogRoller exiting. 2024-11-27T16:23:59,337 INFO [RS:0;7b191dec6496:44169 {}] ipc.NettyRpcServer(351): Stopping server on /172.17.0.2:44169 2024-11-27T16:23:59,340 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44169-0x10039c8a1050001, quorum=127.0.0.1:51088, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/7b191dec6496,44169,1732724452967 2024-11-27T16:23:59,340 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41377-0x10039c8a1050000, quorum=127.0.0.1:51088, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-27T16:23:59,341 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [7b191dec6496,44169,1732724452967] 2024-11-27T16:23:59,342 DEBUG [RegionServerTracker-0 {}] master.DeadServer(103): Processing 7b191dec6496,44169,1732724452967; numProcessing=1 2024-11-27T16:23:59,343 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/draining/7b191dec6496,44169,1732724452967 already deleted, retry=false 2024-11-27T16:23:59,343 INFO [RegionServerTracker-0 {}] master.ServerManager(652): Cluster shutdown set; 7b191dec6496,44169,1732724452967 expired; onlineServers=0 2024-11-27T16:23:59,343 INFO [RegionServerTracker-0 {}] regionserver.HRegionServer(2561): ***** STOPPING region server '7b191dec6496,41377,1732724452229' ***** 2024-11-27T16:23:59,343 INFO [RegionServerTracker-0 {}] regionserver.HRegionServer(2575): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-27T16:23:59,343 DEBUG [M:0;7b191dec6496:41377 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2234ae6b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=7b191dec6496/172.17.0.2:0 2024-11-27T16:23:59,343 INFO [M:0;7b191dec6496:41377 {}] regionserver.HRegionServer(1224): stopping server 7b191dec6496,41377,1732724452229 2024-11-27T16:23:59,343 INFO [M:0;7b191dec6496:41377 {}] regionserver.HRegionServer(1250): stopping server 7b191dec6496,41377,1732724452229; all regions closed. 2024-11-27T16:23:59,343 DEBUG [M:0;7b191dec6496:41377 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-27T16:23:59,343 DEBUG [M:0;7b191dec6496:41377 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-27T16:23:59,343 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-27T16:23:59,343 DEBUG [M:0;7b191dec6496:41377 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-27T16:23:59,343 DEBUG [master/7b191dec6496:0:becomeActiveMaster-HFileCleaner.large.0-1732724455109 {}] cleaner.HFileCleaner(306): Exit Thread[master/7b191dec6496:0:becomeActiveMaster-HFileCleaner.large.0-1732724455109,5,FailOnTimeoutGroup] 2024-11-27T16:23:59,343 DEBUG [master/7b191dec6496:0:becomeActiveMaster-HFileCleaner.small.0-1732724455112 {}] cleaner.HFileCleaner(306): Exit Thread[master/7b191dec6496:0:becomeActiveMaster-HFileCleaner.small.0-1732724455112,5,FailOnTimeoutGroup] 2024-11-27T16:23:59,344 INFO [M:0;7b191dec6496:41377 {}] hbase.ChoreService(370): Chore service for: master/7b191dec6496:0 had [] on shutdown 2024-11-27T16:23:59,344 DEBUG [M:0;7b191dec6496:41377 {}] master.HMaster(1733): Stopping service threads 2024-11-27T16:23:59,344 INFO [M:0;7b191dec6496:41377 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-27T16:23:59,344 ERROR [M:0;7b191dec6496:41377 {}] procedure2.ProcedureExecutor(722): There are still active thread in group java.lang.ThreadGroup[name=PEWorkerGroup,maxpri=10], see STDOUT java.lang.ThreadGroup[name=PEWorkerGroup,maxpri=10] Thread[HFileArchiver-6,5,PEWorkerGroup] 2024-11-27T16:23:59,344 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41377-0x10039c8a1050000, quorum=127.0.0.1:51088, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-27T16:23:59,344 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41377-0x10039c8a1050000, quorum=127.0.0.1:51088, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-27T16:23:59,345 INFO [M:0;7b191dec6496:41377 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-27T16:23:59,345 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-27T16:23:59,345 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:41377-0x10039c8a1050000, quorum=127.0.0.1:51088, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-27T16:23:59,345 DEBUG [M:0;7b191dec6496:41377 {}] zookeeper.ZKUtil(347): master:41377-0x10039c8a1050000, quorum=127.0.0.1:51088, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-27T16:23:59,345 WARN [M:0;7b191dec6496:41377 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-27T16:23:59,345 INFO [M:0;7b191dec6496:41377 {}] assignment.AssignmentManager(391): Stopping assignment manager 2024-11-27T16:23:59,345 INFO [M:0;7b191dec6496:41377 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-27T16:23:59,345 DEBUG [M:0;7b191dec6496:41377 {}] regionserver.HRegion(1681): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-27T16:23:59,345 INFO [M:0;7b191dec6496:41377 {}] regionserver.HRegion(1703): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-27T16:23:59,345 DEBUG [M:0;7b191dec6496:41377 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-27T16:23:59,345 DEBUG [M:0;7b191dec6496:41377 {}] regionserver.HRegion(1791): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-27T16:23:59,345 DEBUG [M:0;7b191dec6496:41377 {}] regionserver.HRegion(1801): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-27T16:23:59,345 INFO [M:0;7b191dec6496:41377 {}] regionserver.HRegion(2837): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=793.77 KB heapSize=978.31 KB 2024-11-27T16:23:59,360 DEBUG [M:0;7b191dec6496:41377 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/1745626033c1407980c170e3937eeb95 is 82, key is hbase:meta,,1/info:regioninfo/1732724456103/Put/seqid=0 2024-11-27T16:23:59,363 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742590_1766 (size=5672) 2024-11-27T16:23:59,442 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44169-0x10039c8a1050001, quorum=127.0.0.1:51088, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-27T16:23:59,442 INFO [RS:0;7b191dec6496:44169 {}] regionserver.HRegionServer(1307): Exiting; stopping=7b191dec6496,44169,1732724452967; zookeeper connection closed. 2024-11-27T16:23:59,442 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44169-0x10039c8a1050001, quorum=127.0.0.1:51088, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-27T16:23:59,442 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@4e337422 {}] hbase.MiniHBaseCluster$SingleFileSystemShutdownThread(216): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@4e337422 2024-11-27T16:23:59,443 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-27T16:23:59,764 INFO [M:0;7b191dec6496:41377 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=2288 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/1745626033c1407980c170e3937eeb95 2024-11-27T16:23:59,784 DEBUG [M:0;7b191dec6496:41377 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/7ef19eeae0434395b22b79062e00f07e is 2278, key is \x00\x00\x00\x00\x00\x00\x00\x98/proc:d/1732724606058/Put/seqid=0 2024-11-27T16:23:59,792 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742591_1767 (size=44313) 2024-11-27T16:24:00,193 INFO [M:0;7b191dec6496:41377 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=793.21 KB at sequenceid=2288 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/7ef19eeae0434395b22b79062e00f07e 2024-11-27T16:24:00,196 INFO [M:0;7b191dec6496:41377 {}] regionserver.StoreFileReader(539): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 7ef19eeae0434395b22b79062e00f07e 2024-11-27T16:24:00,219 DEBUG [M:0;7b191dec6496:41377 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/9b87f3448a724dd2b9f98d22ee44f4b0 is 69, key is 7b191dec6496,44169,1732724452967/rs:state/1732724455247/Put/seqid=0 2024-11-27T16:24:00,224 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073742592_1768 (size=5156) 2024-11-27T16:24:00,625 INFO [M:0;7b191dec6496:41377 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=2288 (bloomFilter=true), to=hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/9b87f3448a724dd2b9f98d22ee44f4b0 2024-11-27T16:24:00,628 DEBUG [M:0;7b191dec6496:41377 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/1745626033c1407980c170e3937eeb95 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/1745626033c1407980c170e3937eeb95 2024-11-27T16:24:00,631 INFO [M:0;7b191dec6496:41377 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/1745626033c1407980c170e3937eeb95, entries=8, sequenceid=2288, filesize=5.5 K 2024-11-27T16:24:00,631 DEBUG [M:0;7b191dec6496:41377 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/7ef19eeae0434395b22b79062e00f07e as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/7ef19eeae0434395b22b79062e00f07e 2024-11-27T16:24:00,633 INFO [M:0;7b191dec6496:41377 {}] regionserver.StoreFileReader(539): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 7ef19eeae0434395b22b79062e00f07e 2024-11-27T16:24:00,633 INFO [M:0;7b191dec6496:41377 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/7ef19eeae0434395b22b79062e00f07e, entries=175, sequenceid=2288, filesize=43.3 K 2024-11-27T16:24:00,634 DEBUG [M:0;7b191dec6496:41377 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/9b87f3448a724dd2b9f98d22ee44f4b0 as hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/9b87f3448a724dd2b9f98d22ee44f4b0 2024-11-27T16:24:00,636 INFO [M:0;7b191dec6496:41377 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34065/user/jenkins/test-data/d121e210-2d8b-c76f-56ab-c2e334ab5d59/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/9b87f3448a724dd2b9f98d22ee44f4b0, entries=1, sequenceid=2288, filesize=5.0 K 2024-11-27T16:24:00,637 INFO [M:0;7b191dec6496:41377 {}] regionserver.HRegion(3040): Finished flush of dataSize ~793.77 KB/812821, heapSize ~978.02 KB/1001488, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 1292ms, sequenceid=2288, compaction requested=false 2024-11-27T16:24:00,638 INFO [M:0;7b191dec6496:41377 {}] regionserver.HRegion(1922): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-27T16:24:00,638 DEBUG [M:0;7b191dec6496:41377 {}] regionserver.HRegion(1635): Region close journal for 1595e783b53d99cd5eef43b6debb2682: 2024-11-27T16:24:00,640 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46329 is added to blk_1073741830_1006 (size=962764) 2024-11-27T16:24:00,640 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(243): LogRoller exiting. 2024-11-27T16:24:00,640 INFO [M:0;7b191dec6496:41377 {}] flush.MasterFlushTableProcedureManager(91): stop: server shutting down. 2024-11-27T16:24:00,641 INFO [M:0;7b191dec6496:41377 {}] ipc.NettyRpcServer(351): Stopping server on /172.17.0.2:41377 2024-11-27T16:24:00,642 DEBUG [M:0;7b191dec6496:41377 {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/rs/7b191dec6496,41377,1732724452229 already deleted, retry=false 2024-11-27T16:24:00,743 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41377-0x10039c8a1050000, quorum=127.0.0.1:51088, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-27T16:24:00,744 INFO [M:0;7b191dec6496:41377 {}] regionserver.HRegionServer(1307): Exiting; stopping=7b191dec6496,41377,1732724452229; zookeeper connection closed. 2024-11-27T16:24:00,744 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41377-0x10039c8a1050000, quorum=127.0.0.1:51088, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-27T16:24:00,749 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@29607158{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-27T16:24:00,752 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@76b7aca8{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-27T16:24:00,752 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-27T16:24:00,752 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@74536f23{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-27T16:24:00,752 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@ac85cee{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/d81050c1-1878-e772-43c1-4ef7cd483263/hadoop.log.dir/,STOPPED} 2024-11-27T16:24:00,755 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-27T16:24:00,755 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-27T16:24:00,755 WARN [BP-632509396-172.17.0.2-1732724449322 heartbeating to localhost/127.0.0.1:34065 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-27T16:24:00,755 WARN [BP-632509396-172.17.0.2-1732724449322 heartbeating to localhost/127.0.0.1:34065 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-632509396-172.17.0.2-1732724449322 (Datanode Uuid 347a9132-bc61-40f9-b233-195ce5f257aa) service to localhost/127.0.0.1:34065 2024-11-27T16:24:00,758 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/d81050c1-1878-e772-43c1-4ef7cd483263/cluster_6cd18b6a-993a-7aed-ac6c-30d6060ee827/dfs/data/data1/current/BP-632509396-172.17.0.2-1732724449322 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-27T16:24:00,759 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/d81050c1-1878-e772-43c1-4ef7cd483263/cluster_6cd18b6a-993a-7aed-ac6c-30d6060ee827/dfs/data/data2/current/BP-632509396-172.17.0.2-1732724449322 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-27T16:24:00,759 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-27T16:24:00,766 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@6904431c{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-27T16:24:00,767 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@20178447{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-27T16:24:00,767 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-27T16:24:00,767 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@704acb07{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-27T16:24:00,767 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@106ffc0e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/d81050c1-1878-e772-43c1-4ef7cd483263/hadoop.log.dir/,STOPPED} 2024-11-27T16:24:00,793 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(345): Shutdown MiniZK cluster with all ZK servers 2024-11-27T16:24:00,987 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1347): Minicluster is down